repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Xreki/Xreki.github.io
|
fluid/inference/inference_transpiler.py
|
1
|
1937
|
import os
import sys
import argparse
import paddle.fluid as fluid
def Transpile(src_dir, dst_dir, model_filename, params_filename):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
print "Loading inference_program from ", src_dir
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(src_dir, exe, model_filename, params_filename)
inference_transpiler_program = inference_program.clone()
# NOTE: Applying the inference transpiler will change the inference_transpiler_program.
t = fluid.InferenceTranspiler()
# Under the with statement, inference_scope is the global scope.
t.transpile(inference_transpiler_program, place)
#print inference_transpiler_program
print "Saving the optimized inference_program to ", dst_dir
# There is a bug in fluid.io.save_inference_model, so we can use the following code instead.
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
model_path = os.path.join(dst_dir, model_filename)
with open(model_path, "wb") as f:
f.write(inference_transpiler_program.desc.serialize_to_string())
fluid.io.save_persistables(exe, dst_dir, inference_transpiler_program, params_filename)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', help='Source directory of inference model')
parser.add_argument('--dst_dir', help='Dstination directory of inference model')
parser.add_argument('--model_filename', default=None, help='The name of model file')
parser.add_argument('--params_filename', default=None, help='The name of params file')
args = parser.parse_args()
Transpile(args.src_dir, args.dst_dir, args.model_filename, args.params_filename)
if __name__ == '__main__':
main()
|
gpl-3.0
| -8,947,622,634,226,983,000
| 41.108696
| 102
| 0.690243
| false
| 3.761165
| false
| false
| false
|
minireference/noBSLAnotebooks
|
aspynb/Linear_algebra_chapters_overview.py
|
1
|
23478
|
def cells():
'''
# Linear algebra overview
'''
'''
'''
'''
Linear algebra is the study of **vectors** and **linear transformations**. This notebook introduces concepts form linear algebra in a birds-eye overview. The goal is not to get into the details, but to give the reader a taste of the different types of thinking: computational, geometrical, and theoretical, that are used in linear algebra.
'''
'''
'''
'''
## Chapters overview
- 1/ Math fundamentals
- 2/ Intro to linear algebra
- Vectors
- Matrices
- Matrix-vector product representation of linear transformations
- Linear property: $f(a\mathbf{x} + b\mathbf{y}) = af(\mathbf{x}) + bf(\mathbf{y})$
- 3/ Computational linear algebra
- Gauss-Jordan elimination procedure
- Augemnted matrix representaiton of systems of linear equations
- Reduced row echelon form
- Matrix equations
- Matrix operations
- Matrix product
- Determinant
- Matrix inverse
- 4/ Geometrical linear algebra
- Points, lines, and planes
- Projection operation
- Coordinates
- Vector spaces
- Vector space techniques
- 5/ Linear transformations
- Vector functions
- Input and output spaces
- Matrix representation of linear transformations
- Column space and row spaces of matrix representations
- Invertible matrix theorem
- 6/ Theoretical linear algebra
- Eigenvalues and eigenvectors
- Special types of matrices
- Abstract vectors paces
- Abstract inner product spaces
- Gram–Schmidt orthogonalization
- Matrix decompositions
- Linear algebra with complex numbers
- 7/ Applications
- 8/ Probability theory
- 9/ Quantum mechanics
- Notation appendix
'''
'''
'''
# helper code needed for running in colab
if 'google.colab' in str(get_ipython()):
print('Downloading plot_helpers.py to util/ (only neded for colab')
!mkdir util; wget https://raw.githubusercontent.com/minireference/noBSLAnotebooks/master/util/plot_helpers.py -P util
'''
'''
# setup SymPy
from sympy import *
x, y, z, t = symbols('x y z t')
init_printing()
# a vector is a special type of matrix (an n-vector is either a nx1 or a 1xn matrix)
Vector = Matrix # define alias Vector so I don't have to explain this during video
Point = Vector # define alias Point for Vector since they're the same thing
# setup plotting
%matplotlib inline
import matplotlib.pyplot as mpl
from util.plot_helpers import plot_vec, plot_vecs, plot_line, plot_plane, autoscale_arrows
'''
'''
'''
# 1/ Math fundamentals
'''
'''
'''
'''
Linear algebra builds upon high school math concepts like:
- Numbers (integers, rationals, reals, complex numbers)
- Functions ($f(x)$ takes an input $x$ and produces an output $y$)
- Basic rules of algebra
- Geometry (lines, curves, areas, triangles)
- The cartesian plane
'''
'''
'''
'''
'''
'''
# 2/ Intro to linear algebra
Linear algebra is the study of vectors and matrices.
'''
'''
'''
'''
## Vectors
'''
'''
'''
# define two vectors
u = Vector([2,3])
v = Vector([3,0])
u
'''
'''
v
'''
'''
plot_vecs(u, v)
autoscale_arrows()
'''
'''
'''
## Vector operations
'''
'''
'''
'''
- Addition (denoted $\vec{u}+\vec{v}$)
- Subtraction, the inverse of addition (denoted $\vec{u}-\vec{v}$)
- Scaling (denoted $\alpha \vec{u}$)
- Dot product (denoted $\vec{u} \cdot \vec{v}$)
- Cross product (denoted $\vec{u} \times \vec{v}$)
'''
'''
'''
'''
### Vector addition
'''
'''
'''
# algebraic
u+v
'''
'''
# graphical
plot_vecs(u, v)
plot_vec(v, at=u, color='b')
plot_vec(u+v, color='r')
autoscale_arrows()
'''
'''
'''
### Basis
When we describe the vector as the coordinate pair $(4,6)$, we're implicitly using the *standard basis* $B_s = \{ \hat{\imath}, \hat{\jmath} \}$. The vector $\hat{\imath} \equiv (1,0)$ is a unit-length vector in the $x$-direciton,
and $\hat{\jmath} \equiv (0,1)$ is a unit-length vector in the $y$-direction.
To be more precise when referring to vectors, we can indicate the basis as a subscript of every cooridnate vector $\vec{v}=(4,6)_{B_s}$, which tells $\vec{v}= 4\hat{\imath}+6\hat{\jmath}=4(1,0) +6(0,1)$.
'''
'''
'''
# the standard basis
ihat = Vector([1,0])
jhat = Vector([0,1])
v = 4*ihat + 6*jhat
v
'''
'''
# geomtrically...
plot_vecs(ihat, jhat, 4*ihat, 6*jhat, v)
autoscale_arrows()
'''
'''
'''
The same vector $\vec{v}$ will correspond to the a different pair of coefficients if a differebt basis is used.
For example, if we use the basis $B^\prime = \{ (1,1), (1,-1) \}$, the same vector $\vec{v}$ must be expressed as $\vec{v} = 5\vec{b}_1 +(-1)\vec{b}_2=(5,-1)_{B^\prime}$.
'''
'''
'''
# another basis B' = { (1,1), (1,-1) }
b1 = Vector([ 1, 1])
b2 = Vector([ 1, -1])
v = 5*b1 + (-1)*b2
v
# How did I know 5 and -1 are the coefficients w.r.t basis {b1,b2}?
# Matrix([[1,1],[1,-1]]).inv()*Vector([4,6])
'''
'''
# geomtrically...
plot_vecs(b1, b2, 5*b1, -1*b2, v)
autoscale_arrows()
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
## Matrix operations
'''
'''
'''
'''
- Addition (denoted $A+B$)
- Subtraction, the inverse of addition (denoted $A-B$)
- Scaling by a constant $\alpha$ (denoted $\alpha A$)
- Matrix-vector product (denoted $A\vec{x}$, related to linear transformations)
- Matrix product (denoted $AB$)
- Matrix inverse (denoted $A^{-1}$)
- Trace (denoted $\textrm{Tr}(A)$)
- Determinant (denoted $\textrm{det}(A)$ or $|A|$)
'''
'''
'''
'''
In linear algebra we'll extend the notion of funttion $f:\mathbb{R}\to \mathbb{R}$, to functions that act on vectors called *linear transformations*. We can understand the properties of linear transformations $T$ in analogy with ordinary functions:
\begin{align*}
\textrm{function }
f:\mathbb{R}\to \mathbb{R}
& \ \Leftrightarrow \,
\begin{array}{l}
\textrm{linear transformation }
T:\mathbb{R}^{n}\! \to \mathbb{R}^{m}
\end{array} \\
\textrm{input } x\in \mathbb{R}
& \ \Leftrightarrow \
\textrm{input } \vec{x} \in \mathbb{R}^n \\
\textrm{output } f(x) \in \mathbb{R}
& \ \Leftrightarrow \
\textrm{output } T(\vec{x})\in \mathbb{R}^m \\
g\circ\! f \: (x) = g(f(x))
& \ \Leftrightarrow \
% \textrm{matrix product }
S(T(\vec{x})) \\
\textrm{function inverse } f^{-1}
& \ \Leftrightarrow \
\textrm{inverse transformation } T^{-1} \\
\textrm{zeros of } f
& \ \Leftrightarrow \
\textrm{kernel of } T \\
\textrm{image of } f
& \ \Leftrightarrow \
\begin{array}{l}
\textrm{image of } T
\end{array}
\end{align*}
'''
'''
'''
'''
## Linear property
$$
T(a\mathbf{x}_1 + b\mathbf{x}_2) = aT(\mathbf{x}_1) + bT(\mathbf{x}_2)
$$
'''
'''
'''
'''
## Matrix-vector product representation of linear transformations
'''
'''
'''
'''
Equivalence between linear transformstions $T$ and matrices $M_T$:
$$
T : \mathbb{R}^n \to \mathbb{R}^m
\qquad
\Leftrightarrow
\qquad
M_T \in \mathbb{R}^{m \times n}
$$
$$
\vec{y} = T(\vec{x})
\qquad
\Leftrightarrow
\qquad
\vec{y} = M_T\vec{x}
$$
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
# 3/ Computational linear algebra
'''
'''
'''
'''
## Gauss-Jordan elimination procedure
Suppose you're asked to solve for $x_1$ and $x_2$ in the following system of equations
\begin{align*}
1x_1 + 2x_2 &= 5 \\
3x_1 + 9x_2 &= 21.
\end{align*}
'''
'''
'''
# represent as an augmented matrix
AUG = Matrix([
[1, 2, 5],
[3, 9, 21]])
AUG
'''
'''
# eliminate x_1 in second equation by subtracting 3x times the first equation
AUG[1,:] = AUG[1,:] - 3*AUG[0,:]
AUG
'''
'''
# simplify second equation by dividing by 3
AUG[1,:] = AUG[1,:]/3
AUG
'''
'''
# eliminate x_2 from first equation by subtracting 2x times the second equation
AUG[0,:] = AUG[0,:] - 2*AUG[1,:]
AUG
'''
'''
'''
This augmented matrix is in *reduced row echelon form* (RREF), and corresponds to the system of equations:
\begin{align*}
1x_1 \ \ \qquad &= 1 \\
1x_2 &= 2,
\end{align*}
so the the solution is $x_1=1$ and $x_2=2$.
'''
'''
'''
'''
## Matrix equations
'''
'''
'''
'''
See **page 177** in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Matrix product
'''
'''
'''
a,b,c,d,e,f, g,h,i,j = symbols('a b c d e f g h i j')
A = Matrix([[a,b],
[c,d],
[e,f]])
B = Matrix([[g,h],
[i,j]])
A, B
'''
'''
A*B
'''
'''
def mat_prod(A, B):
"""Compute the matrix product of matrices A and B."""
assert A.cols == B.rows, "Error: matrix dimensions not compatible."
m, ell = A.shape # A is a m x ell matrix
ell, n = B.shape # B is a ell x n matrix
C = zeros(m,n)
for i in range(0,m):
for j in range(0,n):
C[i,j] = A[i,:].dot(B[:,j])
return C
mat_prod(A,B)
'''
'''
# mat_prod(B,A)
'''
'''
'''
## Determinant
'''
'''
'''
a, b, c, d = symbols('a b c d')
A = Matrix([[a,b],
[c,d]])
A.det()
'''
'''
# Consider the parallelogram with sides:
u1 = Vector([3,0])
u2 = Vector([2,2])
plot_vecs(u1,u2)
plot_vec(u1, at=u2, color='k')
plot_vec(u2, at=u1, color='b')
autoscale_arrows()
# What is the area of this parallelogram?
'''
'''
# base = 3, height = 2, so area is 6
'''
'''
# Compute the area of the parallelogram with sides u1 and u2 using the deteminant
A = Matrix([[3,0],
[2,2]])
A.det()
'''
'''
'''
'''
'''
'''
'''
## Matrix inverse
For an invertible matrix $A$, the matrix inverse $A^{-1}$ acts to undo the effects of $A$:
$$
A^{-1} A \vec{v} = \vec{v}.
$$
The effect applying $A$ followed by $A^{-1}$ (or the other way around) is the identity transformation:
$$
A^{-1}A \ = \ \mathbb{1} \ = \ AA^{-1}.
$$
'''
'''
'''
A = Matrix([[1, 2],
[3, 9]])
A
'''
'''
# Compute deteminant to check if inverse matrix exists
A.det()
'''
'''
'''
The deteminant is non-zero so inverse exists.
'''
'''
'''
A.inv()
'''
'''
A.inv()*A
'''
'''
'''
### Adjugate-matrix formula
The *adjugate matrix* of the matrix $A$ is obtained by replacing each entry of the matrix with a partial determinant calculation (called *minors*). The minor $M_{ij}$ is the determinant of $A$ with its $i$th row and $j$th columns removed.
'''
'''
'''
A.adjugate() / A.det()
'''
'''
'''
### Augmented matrix approach
$$
\left[ \, A \, | \, \mathbb{1} \, \right]
\qquad
-\textrm{Gauss-Jordan elimination}\rightarrow
\qquad
\left[ \, \mathbb{1} \, | \, A^{-1} \, \right]
$$
'''
'''
'''
AUG = A.row_join(eye(2))
AUG
'''
'''
# perform row operations until left side of AUG is in RREF
AUG[1,:] = AUG[1,:] - 3*AUG[0,:]
AUG[1,:] = AUG[1,:]/3
AUG[0,:] = AUG[0,:] - 2*AUG[1,:]
AUG
'''
'''
# the inverse of A is in the right side of RREF(AUG)
AUG[:,2:5] # == A-inverse
'''
'''
# verify A times A-inverse gives the identity matrix...
A*AUG[:,2:5]
'''
'''
'''
### Using elementary matrices
Each row operation $\mathcal{R}_i$ can be represented as an elementary matrix $E_i$. The elementary matrix of a given row operation is obtained by performing the row operation on the identity matrix.
'''
'''
'''
E1 = eye(2)
E1[1,:] = E1[1,:] - 3*E1[0,:]
E2 = eye(2)
E2[1,:] = E2[1,:]/3
E3 = eye(2)
E3[0,:] = E3[0,:] - 2*E3[1,:]
E1, E2, E3
'''
'''
# the sequence of three row operations transforms the matrix A into RREF
E3*E2*E1*A
'''
'''
'''
Recall definition $A^{-1}A=\mathbb{1}$, and we just observed that $E_3E_2E_1 A =\mathbb{1}$, so it must be that $A^{-1}=E_3E_2E_1$.
'''
'''
'''
E3*E2*E1
'''
'''
'''
'''
'''
'''
'''
# 4/ Geometrical linear algebra
Points, lines, and planes are geometrical objects that are conveniently expressed using the language of vectors.
'''
'''
'''
'''
## Points
A point $p=(p_x,p_y,p_z)$ refers to a single location in $\mathbb{R}^3$.
'''
'''
'''
p = Point([2,4,5])
p
'''
'''
'''
## Lines
A line is a one dimensional infinite subset of $\mathbb{R}^3$ that can be described as
$$
\ell: \{ p_o + \alpha \vec{v} \ | \ \forall \alpha \in \mathbb{R} \}.
$$
'''
'''
'''
po = Point([1,1,1])
v = Vector([1,1,0])
plot_line(v, po)
'''
'''
'''
## Planes
A plane is a two-dimensional infinite subset of $\mathbb{R}^3$ that can be described in one of three ways:
The *general equation*:
$$
P: \left\{ \, Ax+By+Cz=D \, \right\}
$$
The *parametric equation*:
$$
P: \{ p_{\textrm{o}}+s\,\vec{v} + t\,\vec{w}, \ \forall s,t \in \mathbb{R} \},
$$
which defines a plane that that contains the point $p_{\textrm{o}}$ and the vectors $\vec{v}$ and $\vec{w}$.
Or the *geometric equation*:
$$
P: \left\{ \vec{n} \cdot [ (x,y,z) - p_{\textrm{o}} ] = 0 \,\right\},
$$
which defines a plane that contains point $p_{\textrm{o}}$ and has normal vector $\hat{n}$.
'''
'''
'''
# plot plane 2x + 1y + 1z = 5
normal = Vector([2, 1, 1])
D = 5
plot_plane(normal, D)
'''
'''
'''
'''
'''
'''
'''
## Projection operation
'''
'''
'''
'''
A projection of the vector $\vec{v}$ in the direction $\vec{d}$ is denoted $\Pi_{\vec{d}}(\vec{v})$. The formula for computing the projections uses the dot product operation:
$$
\Pi_{\vec{d}}(\vec{v})
\ \equiv \
(\vec{v} \cdot \hat{d}) \hat{d}
\ = \
\left(\vec{v} \cdot \frac{\vec{d}}{\|\vec{d}\|} \right) \frac{\vec{d}}{\|\vec{d}\|}.
$$
'''
'''
'''
def proj(v, d):
"""Computes the projection of vector `v` onto direction `d`."""
return v.dot( d/d.norm() )*( d/d.norm() )
'''
'''
v = Vector([2,2])
d = Vector([3,0])
proj_v_on_d = proj(v,d)
plot_vecs(d, v, proj_v_on_d)
autoscale_arrows()
'''
'''
'''
The basic projection operation can be used to compute projection onto planes, and compute distances between geomteirc objects (page 192).
'''
'''
'''
'''
## Bases and coordinate projections
'''
'''
'''
'''
See [page 225](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=68) in v2.2 of the book:
- Different types of bases
- Orthonormal
- Orthogonal
- Generic
- Change of basis operation
'''
'''
'''
'''
## Vector spaces
'''
'''
'''
'''
See **page 231** in v2.2 of the book.
'''
'''
'''
'''
## Vector space techniques
'''
'''
'''
'''
See **page 244** in the book.
'''
'''
'''
'''
'''
'''
'''
'''
# 5/ Linear transformations
'''
'''
'''
'''
See [page 257](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=70) in v2.2 of the book.
'''
'''
'''
'''
## Vector functions
'''
'''
'''
'''
Functions that take vectors as inputs and produce vectors as outputs:
$$
T:\mathbb{R}^{n}\! \to \mathbb{R}^{m}
$$
'''
'''
'''
'''
## Matrix representation of linear transformations
'''
'''
'''
'''
$$
T : \mathbb{R}^n \to \mathbb{R}^m
\qquad
\Leftrightarrow
\qquad
M_T \in \mathbb{R}^{m \times n}
$$
'''
'''
'''
'''
'''
'''
'''
'''
## Input and output spaces
'''
'''
'''
'''
We can understand the properties of linear transformations $T$, and their matrix representations $M_T$ in analogy with ordinary functions:
\begin{align*}
\textrm{function }
f:\mathbb{R}\to \mathbb{R}
& \ \Leftrightarrow \,
\begin{array}{l}
\textrm{linear transformation }
T:\mathbb{R}^{n}\! \to \mathbb{R}^{m} \\
\textrm{represented by the matrix } M_T \in \mathbb{R}^{m \times n}
\end{array} \\
%
\textrm{input } x\in \mathbb{R}
& \ \Leftrightarrow \
\textrm{input } \vec{x} \in \mathbb{R}^n \\
%\textrm{compute }
\textrm{output } f(x) \in \mathbb{R}
& \ \Leftrightarrow \
% \textrm{compute matrix-vector product }
\textrm{output } T(\vec{x}) \equiv M_T\vec{x} \in \mathbb{R}^m \\
%\textrm{function composition }
g\circ\! f \: (x) = g(f(x))
& \ \Leftrightarrow \
% \textrm{matrix product }
S(T(\vec{x})) \equiv M_SM_T \vec{x} \\
\textrm{function inverse } f^{-1}
& \ \Leftrightarrow \
\textrm{matrix inverse } M_T^{-1} \\
\textrm{zeros of } f
& \ \Leftrightarrow \
\textrm{kernel of } T \equiv \textrm{null space of } M_T \equiv \mathcal{N}(A) \\
\textrm{image of } f
& \ \Leftrightarrow \
\begin{array}{l}
\textrm{image of } T \equiv \textrm{column space of } M_T \equiv \mathcal{C}(A)
\end{array}
\end{align*}
Observe we refer to the linear transformation $T$ and its matrix representation $M_T$ interchangeably.
'''
'''
'''
'''
## Finding matrix representations
'''
'''
'''
'''
See [page 269](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=74) in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Invertible matrix theorem
See [page 288](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=78) in the book.
'''
'''
'''
'''
'''
'''
'''
'''
# 6/ Theoretical linear algebra
'''
'''
'''
'''
## Eigenvalues and eigenvectors
An eigenvector of the matirx $A$ is a special input vector, for which the matrix $A$ acts as a scaling:
$$
A\vec{e}_\lambda = \lambda\vec{e}_\lambda,
$$
where $\lambda$ is called the *eigenvalue* and $\vec{e}_\lambda$ is the corresponding eigenvector.
'''
'''
'''
A = Matrix([[1, 5],
[5, 1]])
A
'''
'''
A*Vector([1,0])
'''
'''
A*Vector([1,1])
'''
'''
'''
The *characterisitic polynomial* of the matrix $A$ is defined as
$$
p(\lambda) \equiv \det(A-\lambda \mathbb{1}).
$$
'''
'''
'''
l = symbols('lambda')
(A-l*eye(2)).det()
'''
'''
# the roots of the characteristic polynomial are the eigenvalues of A
solve( (A-l*eye(2)).det(), l)
'''
'''
# or call `eigenvals` method
A.eigenvals()
'''
'''
A.eigenvects()
# can also find eigenvects using (A-6*eye(2)).nullspace() and (A+4*eye(2)).nullspace()
'''
'''
Q, Lambda = A.diagonalize()
Q, Lambda
'''
'''
Q*Lambda*Q.inv() # == eigendecomposition of A
'''
'''
'''
'''
'''
'''
'''
## Special types of matrices
'''
'''
'''
'''
See [page 312](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=83) in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Abstract vectors paces
'''
'''
'''
'''
Generalize vector techniques to other vector like quantities. Allow us to talk about basis, dimention, etc.
See [page 318](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=84) in the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Abstract inner product spaces
'''
'''
'''
'''
Use geometrical notions like length and orthogonaloty for abstract vectors.
See **page 322** in the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Gram–Schmidt orthogonalization
'''
'''
'''
'''
See **page 328**.
'''
'''
'''
'''
'''
'''
'''
'''
## Matrix decompositions
'''
'''
'''
'''
See **page 332**.
'''
'''
'''
'''
'''
'''
'''
'''
## Linear algebra with complex numbers
'''
'''
'''
'''
See [page 339](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=88) in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
# Applications chapters
'''
'''
'''
'''
- Chapter 7: Applications
- Chapter 8: Probability theory
- Chapter 9: Quantum mechanics
'''
'''
'''
'''
'''
'''
'''
'''
# Notation appendix
'''
'''
'''
'''
Check out [page 571](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=142) in the book.
'''
'''
'''
'''
'''
'''
'''
|
mit
| -7,188,144,948,188,234,000
| 16.864536
| 343
| 0.468561
| false
| 3.318349
| false
| false
| false
|
OpenVolunteeringPlatform/django-ovp-projects
|
ovp_projects/migrations/0014_apply.py
|
1
|
1497
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-15 23:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ovp_projects', '0013_project_roles'),
]
operations = [
migrations.CreateModel(
name='Apply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=30, verbose_name='name')),
('date', models.DateTimeField(auto_now_add=True)),
('canceled', models.BooleanField(default=False, verbose_name='Canceled')),
('canceled_date', models.DateTimeField(blank=True, null=True, verbose_name='Canceled date')),
('email', models.CharField(blank=True, max_length=200, null=True, verbose_name='Email')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ovp_projects.Project')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'applies',
'verbose_name': 'apply',
},
),
]
|
agpl-3.0
| 8,839,063,111,466,270,000
| 41.771429
| 141
| 0.606546
| false
| 4.067935
| false
| false
| false
|
heromod/migrid
|
user-projects/EpistasisOnGrid/gridepistasisgui.py
|
1
|
20836
|
import sys
sys.path.append("GUI/")
import epistasisviewer as viewer
import gridepistasis as epistasisControl
import wx
#import time
import os
sys.path.append("RfilesAndscripts/")
import readdata
from threading import Thread
import Configuration.epistasisconfiguration as config
exec_state = "executing"
pending_state = "pending"
finished_state = "finished"
cancelled_state = "cancelled"
class gridepistasisgui:
def __init__(self):
self.gene_selection = set()
self.gene_selection_dict = {}
self.trait_selection = set()
self.trait_selection_dict = {}
self.class_selection = list()
self.all_genes_and_traits = list()
self.data_sheet = []
self.jobs = []
self.epistasis_status = pending_state
self.epistasis_thread = Thread()
def popup_box(self,comment, caption_title=" "):
dlg = wx.MessageDialog(frame_1,
message=comment,
caption=caption_title,
style=wx.OK|wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
def yesno_box(self,comment, caption_title=" "):
dlg = wx.MessageDialog(frame_1,
message=comment,
caption=caption_title,
style=wx.YES_DEFAULT|wx.ICON_INFORMATION
)
choice = dlg.ShowModal()
print choice
dlg.Destroy()
#def load_selection_var_list(self, selection_variables):
#sel_var = frame_1.selection_variable_list.GetSelection()
#value_list = self.data_sheet[sel_var]
#values = list(set(value_list))
## all values are either float or string
#if type(values[0]) == float:
#values = filter(lambda x : str(x) not in ["nan","-99.0"], values)
#elif type(values[0]) == str:
#values = filter(lambda x : x.strip() not in ["?"], values)
#values.sort()
#def clean(v):
#if type(v) == type(1.0) and v % 1 == 0.0:
#v = int(v)
#str_v = str(v).strip()
#return str_v
#values = map(clean,values)
#frame_1.start_class.SetItems(values)
#frame_1.start_class.SetSelection(0)
#frame_1.end_class.SetItems(values)
#frame_1.end_class.SetSelection(len(values)-1)
#while(self.class_selection != []):
#self.class_selection.pop(0)
#self.class_selection.extend(values)
############################
######## GENE SELECTOR TAB###########
#############################
def read_data_sheet(self):
datafile = frame_1.datafile.GetValue()
if not os.path.exists(datafile):
self.popup_box("Can't find "+datafile, "Can't find "+datafile)
return
#print all_genes_and_traits
#data_sheet.update(readdata.read_data(datafile))
data_list, column_labels = readdata.read_data(datafile)
self.data_sheet.extend(data_list)
#column_labels = data_sheet.keys()
self.all_genes_and_traits.extend(column_labels)
#print "all", all_genes_and_traits
#all_genes_and_traits.sort()
frame_1.gene_list.Set(self.all_genes_and_traits)
frame_1.trait_list.Set(self.all_genes_and_traits)
# assume that the selection variable is in first columns
frame_1.selection_variable_list.SetItems(self.all_genes_and_traits[0:20])
#frame_1.selection_variable_list.Select(1)
##### BUTTONS ############
def update_selected_genes(self):
frame_1.selected_genes.Set(list(self.gene_selection))
def on_add_genes(self,event=None):
indexes = frame_1.gene_list.GetSelections()
#indexes = list(index)
#print indexes, all_genes_and_traits
#frame_1.selected_genes.InsertItems(index, 0)
for i in indexes:
# gene_name = all_genes_and_traits[i]
#if not gene_name in gene_selection:
#gene_selection.append(all_genes_and_traits[i])
gene_name = self.all_genes_and_traits[i]
self.gene_selection.add(gene_name)
if not self.gene_selection_dict.has_key(gene_name):
self.gene_selection_dict[gene_name] = i
self.update_selected_genes()
def on_remove_genes(self,event=None):
indexes = list(frame_1.selected_genes.GetSelections())
indexes.reverse()
#print indexes
for i in indexes:
gene_name = list(self.gene_selection)[i] # list converts from set to list
self.gene_selection.remove(gene_name)
del(self.gene_selection_dict[gene_name])
#gene_selection.remove(genes)
self.update_selected_genes()
###########################
######## TRAIT SELECTOR TAB###########
##########################
##### BUTTONS ############
def update_selected_traits(self):
frame_1.selected_traits.Set(list(self.trait_selection))
def on_add_traits(self,event=None):
indexes = frame_1.trait_list.GetSelections()
#indexes = list(index)
#frame_1.selected_genes.InsertItems(index, 0)
for i in indexes:
trait_name = self.all_genes_and_traits[i]
#if not gene_name in gene_selection:
#gene_selection.append(all_genes_and_traits[i])
self.trait_selection.add(trait_name)
if not self.trait_selection_dict.has_key(trait_name):
self.trait_selection_dict[trait_name] = i
self.update_selected_traits()
def on_remove_traits(self,event=None):
indexes = list(frame_1.selected_traits.GetSelections())
indexes.reverse()
for i in indexes:
trait_name = list(self.trait_selection)[i] # list converts set
self.trait_selection.remove(trait_name)
del(self.trait_selection_dict[trait_name])
#gene_selection.remove(genes)
self.update_selected_traits()
##########################
#### GENERAL TAB ############
#########################
def validateInput(self):
try:
g1 = int(frame_1.g1.GetValue())
g2 = int(frame_1.g2.GetValue())
t1 = int(frame_1.t1.GetValue())
t2 = int(frame_1.t2.GetValue())
# sv = int(frame_1.sv.GetValue())
#c1 = int(frame_1.c1.GetValue())
#c2 = int(frame_1.c2.GetValue())
except ValueError:
return False, "Index values must be integers"
#if type(g1) != type(1) and type(g2) != type(1):
# return False, "Genes indexes must be integers"
#if type(t1) != type(1) and type(t2) != type(1):
# return False, "Trait indexes must be integers"
#if type(sv) != type(1) :
# return False, "Selection variable index must be an integer"
datafile = frame_1.datafile.GetValue()
outputdir = frame_1.outputdir.GetValue()
#type(sv) != type(1)
if not os.path.exists(datafile):
return False, "Can't find data file : "+datafile
if not os.path.exists(outputdir):
return False, "Can't find output directory : "+outputdir
if frame_1.selection_variable_list.GetSelection() == -1:
return False, "Choose a selection variable."
return True, "OK"
##### START/ STOP #############
def start(self):
#collect values
datafile = frame_1.datafile.GetValue()
outputdir = frame_1.outputdir.GetValue()
local_mode = frame_1.runlocal.GetValue()
print local_mode
#selected_genes = list(frame_1.selected_genes.GetSelections())
if outputdir[-1] != "/":
outputdir += "/"
if frame_1.use_indexes.GetValue():
g1 = int(frame_1.g1.GetValue())
g2 = int(frame_1.g2.GetValue())
t1 = int(frame_1.t1.GetValue())
t2 = int(frame_1.t2.GetValue())
#sv = frame_1.sv.GetValue()
#c1 = frame_1.c1.GetValue()
#c2 = frame_1.c2.GetValue()
genes = range(g1,g2+1)
traits = range(t1,t2+1)
#, traits = readdata.get_by_index(datafile,g1,g2,t1,t2)
else:
genes = self.gene_selection_dict.values()
traits = self.trait_selection_dict.values()
list_pos = frame_1.selection_variable_list.GetSelection()+1 # indexes start at 1 in R
selection_variable = list_pos
i = frame_1.start_class.GetSelection()
j = frame_1.end_class.GetSelection()
selection_variable_values= self.class_selection[i:j+1]
self.epistasis_thread = My_epistasis_thread(genelist=genes, traitlist=traits, selection_variable=selection_variable, selection_variable_values=selection_variable_values, data=datafile, output_dir=outputdir, local_mode=local_mode)
# genelist,traitlist,selection_variable, selection_variable_values,local_mode,data,output_dir
frame_1.statusfeed.write("Creating %i jobs..." % len(selection_variable_values))
self.epistasis_thread.start()
#jobs = model.start_epistasis(c1,c2,g1,g2,t1,t2,sv,datafile,outputdir,local_mode)
#self.jobs = self.epistasis_thread.start_epistasis(genelist=genes,traitlist=traits,selection_variable=selection_variable, selection_variable_values=selection_variable_values,local_mode=local_mode,data=datafile,output_dir=outputdir)
#model.epistasis_status=exec_state
self.epistasis_status = exec_state
frame_1.timer.Start(milliseconds=2000) # start the timer for 2 sec
def stop(self):
self.epistasis_thread.stop()
#model.clean_up_epistasis()
#model.__init__()
self.EnableControls(True)
#frame_1.timer.Stop()
#self.update_gui()
#self.epistasis_thread.join()
def finish(self):
self.epistasis_thread.join()
def post_commands(self):
post_exec_str = frame_1.post_exec_cmds.GetValue()
post_exec_commands = post_exec_str.split(";\n")
for cmd in post_exec_commands:
try:
proc = os.popen(cmd, "w")
proc.close()
except OSError:
print "Unable to execute command :"+cmd
def final(self):
#model.clean_up_epistasis()
self.post_commands()
def EnableControls(self,enable):
frame_1.datafile.Enable(enable)
#frame_1.g1.Enable(enable)
#frame_1.g2.Enable(enable)
#frame_1.t1.Enable(enable)
#frame_1.t2.Enable(enable)
#frame_1.sv.Enable(enable)
frame_1.datafile.Enable(enable)
frame_1.outputdir.Enable(enable)
frame_1.button_1.Enable(enable)
frame_1.button_2.Enable(enable)
frame_1.Start.Enable(enable)
frame_1.Stop.Enable(enable)
frame_1.runlocal.Enable(enable)
frame_1.use_indexes.Enable(enable)
#frame_1.c1.Enable(enable)
#frame_1.c2.Enable(enable)
def update_gui(self):
if self.epistasis_status == pending_state: # if the grid jobs havent been started, do nothing
return
running_jobs = self.epistasis_thread.jobs
finished_jobs = self.epistasis_thread.finished_jobs
all_jobs = []
all_jobs.extend(running_jobs)
all_jobs.extend(finished_jobs)
if all_jobs == []: # jobs not ready yet
return
if len(all_jobs) > 0 and len(all_jobs) == len(finished_jobs) :
self.epistasis_status = finished_state
progress_str = str(len(finished_jobs)) + '/'\
+ str(len(all_jobs))
status_lines = self.create_gui_job_text(all_jobs)
status = ""
for line in status_lines:
status += line + '\n'
frame_1.statusfeed.Clear()
frame_1.statusfeed.write(status)
frame_1.progress.Clear()
frame_1.progress.write(progress_str)
def create_gui_job_text(self,jobs):
"""Return a status string for each job"""
lines = []
for j in jobs:
line = 'Grid Epistasis Job \t %(class)s \t %(status)s \t %(started)s' % j
lines.append(line)
return lines
##### BUTTONS ############
# event handlers
def OnBtnStart(self,event=None):
valid, comment = self.validateInput()
if not valid:
self.popup_box(comment, "Incorret input")
return
#model.epistasis_status = pending_state
self.epistasis_status = pending_state
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("Starting epistasis...")
self.EnableControls(False)
frame_1.Stop.Enable(True)
TIMER_ID = 100 # pick a number
shorttime= 100
frame_1.timer = wx.Timer(frame_1, TIMER_ID) # message will be sent to the panel
frame_1.timer.Start(shorttime)
def OnBtnStop(self,event=None):
if self.epistasis_status == exec_state:
self.epistasis_status = cancelled_state
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("Stopping epistasis...")
def OnBtnBrowseFile(self,event=None):
path = os.curdir
fd = wx.FileDialog(frame_1, message="Choose file")
fd.ShowModal()
fd.Destroy()
frame_1.datafile.Clear()
frame_1.datafile.write(fd.GetPath())
#self.read_data_sheet()
def on_load_button(self, event=None):
self.read_data_sheet()
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("File loaded.")
epi_gui.EnableControls(True)
def OnBtnBrowseDir(self,event=None):
path = frame_1.outputdir.GetValue()
if path == "":
path = os.curdir
dd = wx.DirDialog(frame_1, message="Choose dir", defaultPath=path)
dd.ShowModal()
dd.Destroy()
frame_1.outputdir.Clear()
frame_1.outputdir.write(dd.GetPath())
def OnMenuQuit(self,event=None):
if self.epistasis_thread.is_alive():
self.epistasis_thread.stop()
self.epistasis_thread.join()
frame_1.Destroy()
def on_use_indexes(self,event=None):
value = frame_1.use_indexes.GetValue()
frame_1.gene_index1_label.Enable(value)
frame_1.gene_index2_label.Enable(value)
frame_1.trait_index1_label.Enable(value)
frame_1.trait_index2_label.Enable(value)
frame_1.g1.Enable(value)
frame_1.g2.Enable(value)
frame_1.t1.Enable(value)
frame_1.t2.Enable(value)
def on_choice(self,event=None):
sel_var = frame_1.selection_variable_list.GetSelection()
value_list = self.data_sheet[sel_var]
values = list(set(value_list))
# all values are either float or string
if type(values[0]) == float:
values = filter(lambda x : str(x) not in ["nan","-99.0"], values)
elif type(values[0]) == str:
values = filter(lambda x : x.strip() not in ["?"], values)
values.sort()
def clean(v):
if type(v) == type(1.0) and v % 1 == 0.0:
v = int(v)
str_v = str(v).strip()
return str_v
values = map(clean,values)
frame_1.start_class.SetItems(values)
frame_1.start_class.SetSelection(0)
frame_1.end_class.SetItems(values)
frame_1.end_class.SetSelection(len(values)-1)
while(self.class_selection != []):
self.class_selection.pop(0)
self.class_selection.extend(values)
def bindViewerEvents(self):
frame_1.button_1.Bind(wx.EVT_BUTTON, self.OnBtnBrowseFile)
frame_1.load_data_button.Bind(wx.EVT_BUTTON, self.on_load_button)
frame_1.button_2.Bind(wx.EVT_BUTTON, self.OnBtnBrowseDir)
frame_1.Start.Bind(wx.EVT_BUTTON, self.OnBtnStart)
frame_1.Stop.Bind(wx.EVT_BUTTON, self.OnBtnStop)
frame_1.add_genes.Bind(wx.EVT_BUTTON,self.on_add_genes)
frame_1.remove_genes.Bind(wx.EVT_BUTTON, self.on_remove_genes)
frame_1.add_traits.Bind(wx.EVT_BUTTON,self.on_add_traits)
frame_1.remove_traits.Bind(wx.EVT_BUTTON, self.on_remove_traits)
#frame_1.notebook_1_pane_2.Bind(wx.EVT_BUTTON,on_gene_selector_tab)
#frame_1.notebook_1_pane_2.Bind(wx.EVT_KEY_DOWN,on_gene_selector_tab)
frame_1.use_indexes.Bind(wx.EVT_CHECKBOX, self.on_use_indexes)
frame_1.selection_variable_list.Bind(wx.EVT_CHOICE,self.on_choice)
frame_1.Bind(wx.EVT_MENU, self.OnMenuQuit)
frame_1.Bind(wx.EVT_CLOSE, self.OnMenuQuit)
frame_1.Bind(wx.EVT_TIMER, self.OnTimer)
def OnTimer(self,event=None):
#print "timer event. status "+self.epistasis_status, exec_state, self.epistasis_status == exec_state
self.update_gui()
#print "restarting timer"
frame_1.timer.Start(milliseconds=config.gui_update_timer)
if self.epistasis_status == pending_state:
self.start()
#elif self.epistasis_status == exec_state:
#print "restarting timer"
#frame_1.timer.Start(milliseconds=config.gui_update_timer)
elif self.epistasis_status == finished_state:
frame_1.timer.Stop()
self.finish()
self.popup_box('Result files are in your output directory.', 'Epistasis complete')
self.final()
self.EnableControls(True)
elif self.epistasis_status == cancelled_state:
self.stop()
frame_1.timer.Stop()
self.final()
self.update_gui()
class My_epistasis_thread(Thread):
def __init__(self, genelist, traitlist, selection_variable, selection_variable_values, data, output_dir, local_mode):
Thread.__init__(self)
self.genelist = genelist
self.traitlist = traitlist
self.selection_variable = selection_variable
self.selection_variable_values = selection_variable_values
self.data = data
self.output_dir = output_dir
self.status = ""
self.progress = ""
self.cancel_jobs = False
self.jobs = []
self.finished_jobs = []
self.local_mode = local_mode
def run(self):
import time
self.jobs = epistasisControl.start_epistasis(self.selection_variable_values, self.genelist,self.traitlist, self.selection_variable, self.data, self.output_dir, local_mode=self.local_mode)
total_jobs = len(self.jobs)
time.sleep(5)
while True:
print "Updating"
self.jobs = epistasisControl.update_epistasis(self.jobs)
for j in self.jobs:
if j["status"] == "FINISHED":
epistasisControl.download_output(j)
self.jobs.remove(j)
self.finished_jobs.append(j)
if self.cancel_jobs: # Stop
epistasisControl.stop_epistasis(self.jobs)
self.jobs = epistasisControl.update_epistasis(self.jobs)
#self.update_epistasis()
break
if total_jobs == len(self.finished_jobs): # we're finished
break
time.sleep(config.polling_update_timer)
print "Thread exiting"
def stop(self):
self.cancel_jobs = True
# frame_1.runlocal.Bind(wx.EVT_CHECKBOX, OnLocal)
if __name__ == '__main__':
epi_gui = gridepistasisgui()
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = viewer.MyEpiFrame(None, -1, "")
# dissable all controls except file browser
epi_gui.EnableControls(False)
frame_1.datafile.Enable(True)
frame_1.button_1.Enable(True)
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("Load a data file to get started.")
#model = EpiModel.GridEpistasis()
#read_genes()
#epi_gui.read_data_sheet()
app.SetTopWindow(frame_1)
epi_gui.bindViewerEvents()
frame_1.Show()
app.MainLoop()
|
gpl-2.0
| 93,554,748,704,978,140
| 35.618629
| 239
| 0.566376
| false
| 3.679322
| true
| false
| false
|
gandalf221553/CodeSection
|
compilare/compilatore.py
|
1
|
6485
|
def RowChanger(row,textToSearch,textToReplace,fileToSearch):
a=1
import fileinput
tempFile = open( fileToSearch, 'r+' )
for line in fileinput.input( fileToSearch ):
if row in line :
print('done yet')
a=0
if a:
if textToReplace=="0":
textToReplace = textToSearch+"\n"+row
#fileToSearch = 'D:\dummy1.txt'
tempFile = open( fileToSearch, 'r+' )
for line in fileinput.input( fileToSearch ):
if textToSearch in line :
print('done now')
tempFile.write(line.replace(textToSearch,textToReplace))
tempFile.close()
#http://pythoncentral.io/pyinstaller-package-python-applications-windows-mac-linux/
def ModSpec():
print("modddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
import os
print(os.path.basename(os.path.dirname(os.path.realpath(__file__))))
#nome=os.path.basename(os.path.dirname(os.path.realpath(__file__)))
nome="kivy_matplotlib"
icon=1
onefile=0
executive=0
vuoimettereunimmagine=0
altrecartelle=0
versionfile=0
nomepy=nome+".py"
nomespec=nome+".spec"
nomecart="\\"+nome+"\\"
nomeIcon="icon.ico"
import platform
#windowsonly="" if StringCnf(platform.system(),"Windows") else windowsonly=" -m "
from calcolatrice.misuras import StringCnf
if StringCnf(platform.system(),"Windows"):
windowsonly=" -m "
else:
windowsonly=""
if onefile:
vuoifareunfile=" --onefile"
else:
vuoifareunfile=""
if vuoimettereunimmagine:
nomeimmagine="logo.png"
else:
nomeimmagine=""
if icon:
iconStr=" --icon "+nomeIcon+" "
else:
iconStr=""
#compilatore
a=""#"\\"+os.getcwd()
posizione=a+nomepy
if versionfile:
versionfile=" --version-file=version.txt "
else:
versionfile=""
pythonpath="!python "#"C:\\Users\\Von Braun\\Downloads\\WinPython-64bit-3.5.2.3Qt5\\python-3.5.2.amd64\\Scripts\\pyinstaller.exe "
#pythonpath="path='"+a+"'"
#pythonpath= "C:\Users\Von Braun\Downloads\WinPython-64bit-3.5.2.3Qt5\python-3.5.2.amd64\python.exe "
pyinstallerpath="PyInstaller "
#pyinstallerpath="C:\Users\Von Braun\Downloads\WinPython-64bit-3.5.2.3Qt5\python-3.5.2.amd64\Lib\site-packages\PyInstaller\building\makespec.py "
#http://stackoverflow.com/questions/8663046/how-to-install-a-python-package-from-within-ipython
#%%!python -m PyInstaller --onefile --name nome --icon icon.ico kivy_matplotlib.py
print("\n\n ATTENDI.....POTRESTI DOVER ASPETTARE MOLTO TEMPO\n\n")
creaspecfile=pythonpath+windowsonly+pyinstallerpath+posizione+vuoifareunfile+" --windowed "+" --name "+nome+iconStr+versionfile
print(creaspecfile)
print("\n\n")
if executive and 0:
#from IPython import get_ipython
#ipython = get_ipython()
#ipython.magic(exec(creaspecfile))
#run(creaspecfile)
#exec(input("inserisci la frase di sopra\n\n"))
import PyInstaller.__main__
specpath="--specpath " +os.getcwd() #/opt/bk/spec
distpath="--distpath " +os.getcwd()+"\\dist" #/opt/bk/dist
workpath="--workpath " +os.getcwd()+"\\build" #/opt/bk/build
print(specpath)
print(distpath)
print(workpath)
#import PyInstaller.utils.cliutils.makespec
#'C:\\Users\\Von Braun\\Google Drive\\mat2pylab\\ProgettoTesi3.86\\hello'
#'C:\\Users\\Von Braun\\Downloads\\WinPython-64bit-3.5.2.3Qt5\\settings'
#pathex=['C:\\Users\\Von Braun\\Downloads\\WinPython-64bit-3.5.2.3Qt5\\python-3.5.2.amd64\\Lib\\site-packages\\PyInstaller']
#PyInstaller.__main__.run_makespec([nomepy,pathex])
PyInstaller.__main__.run(["-y", "-w",nomepy])
#exec(creaspecfile)
if 1:
import os.path
esistelospec=os.path.isfile(nomespec)
if esistelospec==0:
from sys import exit
exit()
print("\ncreazione dello spec completata")
#add this lines to the spec fil
#http://stackoverflow.com/questions/17140886/how-to-search-and-replace-text-in-a-file-using-python
print("modifica dello spec in corso\n\n")
import fileinput
riga="from kivy.deps import sdl2, glew"
textToSearch = "# -*- mode: python -*-"
NomeFile = nome+".spec"
#fileToSearch = 'D:\dummy1.txt'
RowChanger(riga,textToSearch,"0",NomeFile)
if altrecartelle:
nuova="Tree('.."+nomecart+"'),"
textToSearch="coll = COLLECT(exe,"
textSub=textToSearch+nuova
RowChanger(nuova,textToSearch,textSub,NomeFile)
#if icona:
# modIcon=" "+"icon='icon.ico',"
# cerca="exe = EXE(pyz,"
# Modificatore(modIcon,cerca,"0",NomeFile)
cerca2="a.datas,"
modText2=" "+"*[Tree(p) for p in (sdl2.dep_bins + glew.dep_bins)],"
RowChanger(modText2,cerca2,"0",NomeFile)
print("spec file completed")
print("modddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
#coll = COLLECT(exe, Tree('examples-path\\demo\\touchtracer\\'),
#--onefile
print("\n\nsta per iniziare la compilazione, attendi fino a che non avrà finito, troverai il file exe nella cartella DIST\n")
compilaspecfile=pythonpath+windowsonly+pyinstallerpath+nomespec
print(compilaspecfile)
if executive:
#ipython = get_ipython()
#exec(input("inserisci la frase di sopra\n\n"))
import PyInstaller.__main__
PyInstaller.__main__.run(["-y", "-w","kivy_matplotlib.py"])
#run(exec(creaspecfile))
print("\ncompilation complete")
"""
if args.filenames[0].endswith('.spec'):
spec_file = args.filenames[0]
else:
spec_file = run_makespec(**vars(args))
##############################################################################################
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
a=os.getcwd()
print(a)
#os.chdir("C:\\Users\\Von Braun\\Google Drive\\mat2pylab\\ProgettoTesi4.00")
print(spec_file)
from compilatore import ModSpec
ModSpec()
os.chdir(a)
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
##############################################################################################
run_build(pyi_config, spec_file, **vars(args))
"""
|
mit
| 6,083,077,644,254,006,000
| 38.066265
| 149
| 0.6095
| false
| 3.274747
| false
| false
| false
|
broadinstitute/herc
|
herc/async.py
|
1
|
2127
|
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from tornado import ioloop
from tornado.concurrent import run_on_executor
from tornado import gen
from functools import wraps
import threading
executors = {
'short': ThreadPoolExecutor(max_workers=8), # For little things to avoid blocking the main thread
'long': ThreadPoolExecutor(max_workers=4), # For longer work, like file I/O
'aurora': ThreadPoolExecutor(max_workers=4), # Exclusively for communicating with Aurora
'docker': ThreadPoolExecutor(max_workers=4) # Exclusively for communicating with Docker
}
def usepool(executor):
"""
Decorator that runs the decorated function asynchronously in the given executor pool whenever it's run.
Anything calling a function decorated with this decorator must be a gen.coroutine.
"""
def dec(func):
@wraps(func)
@gen.coroutine
def inner(*args, **kwargs):
t = Task(executor)
ret = yield t.run(func, *args, **kwargs)
raise gen.Return(ret)
return inner
return dec
class Task:
"""
Class that turns any function into an asynchronous call.
Usage: t = Task( 'executorname' )
result = yield t.run( fn, *args, **kwargs )
Caller must be a gen.coroutine.
"""
def __init__(self, executor):
self.executor = executors[executor]
self.io_loop = ioloop.IOLoop.instance()
@run_on_executor
def run(self, fn, *args, **kwargs):
return fn(*args, **kwargs)
class ThreadedDict(object):
"""A dict of values keyed by thread id."""
def __init__(self, ctor_func):
self.ctor_func = ctor_func
self._thrdict = dict()
def get(self):
"""Get the ID of the current thread. If there exists a value in the dict for that thread, return it.
Otherwise, construct one and return that."""
thrid = threading.get_ident()
try:
return self._thrdict[thrid]
except KeyError:
#No value! Create it.
self._thrdict[thrid] = self.ctor_func()
return self._thrdict[thrid]
|
bsd-3-clause
| 4,405,503,052,446,012,000
| 31.723077
| 108
| 0.649741
| false
| 3.968284
| false
| false
| false
|
ch1huizong/dj
|
bookmarks/bookmarks/settings.py
|
1
|
4531
|
"""
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
from django.urls import reverse_lazy
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-u2yf301#o_1%qs#fytmr$d1$*dzpxo7x-k^is!lyfyix4czs1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'dj.com',
'localhost',
'127.0.0.1',
'e5bd050d.ngrok.io',
]
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'images.apps.ImagesConfig',
'actions.apps.ActionsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'sorl.thumbnail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'bookmarks',
'USER': 'postgres',
'PASSWORD': 'quiet',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
# email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# auth
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.google.GoogleOAuth2',
]
SOCIAL_AUTH_FACEBOOK_KEY = '1888178554562550'
SOCIAL_AUTH_FACEBOOK_SECRET = '9d6b83ebce4890e0890fd1269b7d68a8'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email',]
SOCIAL_AUTH_TWITTER_KEY = ''
SOCIAL_AUTH_TWITTER_SECRET = ''
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '329800332506-nm2hfiuan4usgpit6dohf6u7djeugoqh.apps.googleusercontent.com'
GOOGLE_AUTH_GOOGLE_OAUTH2_SECRET = 'Th6sThj4Xp-NWQ4t0ANncAcS'
# ssl
#SECURE_SSL_REDIRECT = True
# thumbnail
#THUMBNAIL_DEBUG = True
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: reverse_lazy('user_detail', args=[u.username]),
}
# redis
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
|
unlicense
| -7,262,526,610,636,818,000
| 24.59887
| 106
| 0.696314
| false
| 3.248029
| false
| false
| false
|
mitocw/edx-platform
|
lms/djangoapps/course_api/views.py
|
1
|
14554
|
"""
Course API Views
"""
from django.core.exceptions import ValidationError
from django.core.paginator import InvalidPage
from edx_rest_framework_extensions.paginators import NamespacedPageNumberPagination
from django.http import HttpResponseRedirect
from django.urls import reverse
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.throttling import UserRateThrottle
from rest_framework.exceptions import NotFound
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from . import USE_RATE_LIMIT_2_FOR_COURSE_LIST_API, USE_RATE_LIMIT_10_FOR_COURSE_LIST_API
from .api import course_detail, list_course_keys, list_courses
from .forms import CourseDetailGetForm, CourseIdListGetForm, CourseListGetForm
from .serializers import CourseDetailSerializer, CourseKeySerializer, CourseSerializer
# MIT-OLL : course Id for temporary redirection of a course
BIOLOGY_COURSE_ID = 'course-v1:OCW+Pre-7.01+1T2020'
@view_auth_classes(is_authenticated=False)
class CourseDetailView(DeveloperErrorViewMixin, RetrieveAPIView):
"""
**Use Cases**
Request details for a course
**Example Requests**
GET /api/courses/v1/courses/{course_key}/
**Response Values**
Body consists of the following fields:
* effort: A textual description of the weekly hours of effort expected
in the course.
* end: Date the course ends, in ISO 8601 notation
* enrollment_end: Date enrollment ends, in ISO 8601 notation
* enrollment_start: Date enrollment begins, in ISO 8601 notation
* id: A unique identifier of the course; a serialized representation
of the opaque key identifying the course.
* media: An object that contains named media items. Included here:
* course_image: An image to show for the course. Represented
as an object with the following fields:
* uri: The location of the image
* name: Name of the course
* number: Catalog number of the course
* org: Name of the organization that owns the course
* overview: A possibly verbose HTML textual description of the course.
Note: this field is only included in the Course Detail view, not
the Course List view.
* short_description: A textual description of the course
* start: Date the course begins, in ISO 8601 notation
* start_display: Readably formatted start of the course
* start_type: Hint describing how `start_display` is set. One of:
* `"string"`: manually set by the course author
* `"timestamp"`: generated from the `start` timestamp
* `"empty"`: no start date is specified
* pacing: Course pacing. Possible values: instructor, self
Deprecated fields:
* blocks_url: Used to fetch the course blocks
* course_id: Course key (use 'id' instead)
**Parameters:**
username (optional):
The username of the specified user for whom the course data
is being accessed. The username is not only required if the API is
requested by an Anonymous user.
**Returns**
* 200 on success with above fields.
* 400 if an invalid parameter was sent or the username was not provided
for an authenticated request.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the course is not available or cannot be seen.
Example response:
{
"blocks_url": "/api/courses/v1/blocks/?course_id=edX%2Fexample%2F2012_Fall",
"media": {
"course_image": {
"uri": "/c4x/edX/example/asset/just_a_test.jpg",
"name": "Course Image"
}
},
"description": "An example course.",
"end": "2015-09-19T18:00:00Z",
"enrollment_end": "2015-07-15T00:00:00Z",
"enrollment_start": "2015-06-15T00:00:00Z",
"course_id": "edX/example/2012_Fall",
"name": "Example Course",
"number": "example",
"org": "edX",
"overview: "<p>A verbose description of the course.</p>"
"start": "2015-07-17T12:00:00Z",
"start_display": "July 17, 2015",
"start_type": "timestamp",
"pacing": "instructor"
}
"""
serializer_class = CourseDetailSerializer
def get_object(self):
"""
Return the requested course object, if the user has appropriate
permissions.
"""
requested_params = self.request.query_params.copy()
requested_params.update({'course_key': self.kwargs['course_key_string']})
form = CourseDetailGetForm(requested_params, initial={'requesting_user': self.request.user})
if not form.is_valid():
raise ValidationError(form.errors)
return course_detail(
self.request,
form.cleaned_data['username'],
form.cleaned_data['course_key'],
)
class CourseListUserThrottle(UserRateThrottle):
"""Limit the number of requests users can make to the course list API."""
# The course list endpoint is likely being inefficient with how it's querying
# various parts of the code and can take courseware down, it needs to be rate
# limited until optimized. LEARNER-5527
THROTTLE_RATES = {
'user': '20/minute',
'staff': '40/minute',
}
def check_for_switches(self):
if USE_RATE_LIMIT_2_FOR_COURSE_LIST_API.is_enabled():
self.THROTTLE_RATES = {
'user': '2/minute',
'staff': '10/minute',
}
elif USE_RATE_LIMIT_10_FOR_COURSE_LIST_API.is_enabled():
self.THROTTLE_RATES = {
'user': '10/minute',
'staff': '20/minute',
}
def allow_request(self, request, view):
self.check_for_switches()
# Use a special scope for staff to allow for a separate throttle rate
user = request.user
if user.is_authenticated and (user.is_staff or user.is_superuser):
self.scope = 'staff'
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
return super(CourseListUserThrottle, self).allow_request(request, view)
class LazyPageNumberPagination(NamespacedPageNumberPagination):
"""
NamespacedPageNumberPagination that works with a LazySequence queryset.
The paginator cache uses ``@cached_property`` to cache the property values for
count and num_pages. It assumes these won't change, but in the case of a
LazySquence, its count gets updated as we move through it. This class clears
the cached property values before reporting results so they will be recalculated.
"""
def get_paginated_response(self, data):
# Clear the cached property values to recalculate the estimated count from the LazySequence
del self.page.paginator.__dict__['count']
del self.page.paginator.__dict__['num_pages']
# Paginate queryset function is using cached number of pages and sometime after
# deleting from cache when we recalculate number of pages are different and it raises
# EmptyPage error while accessing the previous page link. So we are catching that exception
# and raising 404. For more detail checkout PROD-1222
page_number = self.request.query_params.get(self.page_query_param, 1)
try:
self.page.paginator.validate_number(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=str(exc)
)
self.page.number = self.page.paginator.num_pages
raise NotFound(msg)
return super(LazyPageNumberPagination, self).get_paginated_response(data)
@view_auth_classes(is_authenticated=False)
class CourseListView(DeveloperErrorViewMixin, ListAPIView):
"""
**Use Cases**
Request information on all courses visible to the specified user.
**Example Requests**
GET /api/courses/v1/courses/
**Response Values**
Body comprises a list of objects as returned by `CourseDetailView`.
**Parameters**
search_term (optional):
Search term to filter courses (used by ElasticSearch).
username (optional):
The username of the specified user whose visible courses we
want to see. The username is not required only if the API is
requested by an Anonymous user.
org (optional):
If specified, visible `CourseOverview` objects are filtered
such that only those belonging to the organization with the
provided org code (e.g., "HarvardX") are returned.
Case-insensitive.
**Returns**
* 200 on success, with a list of course discovery objects as returned
by `CourseDetailView`.
* 400 if an invalid parameter was sent or the username was not provided
for an authenticated request.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the specified user does not exist, or the requesting user does
not have permission to view their courses.
Example response:
[
{
"blocks_url": "/api/courses/v1/blocks/?course_id=edX%2Fexample%2F2012_Fall",
"media": {
"course_image": {
"uri": "/c4x/edX/example/asset/just_a_test.jpg",
"name": "Course Image"
}
},
"description": "An example course.",
"end": "2015-09-19T18:00:00Z",
"enrollment_end": "2015-07-15T00:00:00Z",
"enrollment_start": "2015-06-15T00:00:00Z",
"course_id": "edX/example/2012_Fall",
"name": "Example Course",
"number": "example",
"org": "edX",
"start": "2015-07-17T12:00:00Z",
"start_display": "July 17, 2015",
"start_type": "timestamp"
}
]
"""
class CourseListPageNumberPagination(LazyPageNumberPagination):
max_page_size = 100
pagination_class = CourseListPageNumberPagination
serializer_class = CourseSerializer
throttle_classes = (CourseListUserThrottle,)
def get_queryset(self):
"""
Yield courses visible to the user.
"""
form = CourseListGetForm(self.request.query_params, initial={'requesting_user': self.request.user})
if not form.is_valid():
raise ValidationError(form.errors)
return list_courses(
self.request,
form.cleaned_data['username'],
org=form.cleaned_data['org'],
filter_=form.cleaned_data['filter_'],
search_term=form.cleaned_data['search_term']
)
class CourseIdListUserThrottle(UserRateThrottle):
"""Limit the number of requests users can make to the course list id API."""
THROTTLE_RATES = {
'user': '20/minute',
'staff': '40/minute',
}
def allow_request(self, request, view):
# Use a special scope for staff to allow for a separate throttle rate
user = request.user
if user.is_authenticated and (user.is_staff or user.is_superuser):
self.scope = 'staff'
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
return super(CourseIdListUserThrottle, self).allow_request(request, view)
@view_auth_classes()
class CourseIdListView(DeveloperErrorViewMixin, ListAPIView):
"""
**Use Cases**
Request a list of course IDs for all courses the specified user can
access based on the provided parameters.
**Example Requests**
GET /api/courses/v1/courses_ids/
**Response Values**
Body comprises a list of course ids and pagination details.
**Parameters**
username (optional):
The username of the specified user whose visible courses we
want to see.
role (required):
Course ids are filtered such that only those for which the
user has the specified role are returned. Role can be "staff"
or "instructor".
Case-insensitive.
**Returns**
* 200 on success, with a list of course ids and pagination details
* 400 if an invalid parameter was sent or the username was not provided
for an authenticated request.
* 403 if a user who does not have permission to masquerade as
another user who specifies a username other than their own.
* 404 if the specified user does not exist, or the requesting user does
not have permission to view their courses.
Example response:
{
"results":
[
"course-v1:edX+DemoX+Demo_Course"
],
"pagination": {
"previous": null,
"num_pages": 1,
"next": null,
"count": 1
}
}
"""
class CourseIdListPageNumberPagination(LazyPageNumberPagination):
max_page_size = 1000
pagination_class = CourseIdListPageNumberPagination
serializer_class = CourseKeySerializer
throttle_classes = (CourseIdListUserThrottle,)
def get_queryset(self):
"""
Returns CourseKeys for courses which the user has the provided role.
"""
form = CourseIdListGetForm(self.request.query_params, initial={'requesting_user': self.request.user})
if not form.is_valid():
raise ValidationError(form.errors)
return list_course_keys(
self.request,
form.cleaned_data['username'],
role=form.cleaned_data['role'],
)
def redirect_courses(request):
return HttpResponseRedirect((reverse('about_course', kwargs={'course_id': BIOLOGY_COURSE_ID})))
|
agpl-3.0
| -6,341,263,580,877,471,000
| 36.413882
| 109
| 0.616669
| false
| 4.419678
| false
| false
| false
|
tomwright01/AO_Registration
|
example.py
|
1
|
1605
|
import AoRegistration.AoRecording as AoRecording
import timeit
import logging
import argparse
def main():
"""
"""
logging.info('Reading file:%s','data/sample.avi')
vid = AoRecording.AoRecording(filepath='data/sample.avi')
vid.load_video()
logging.info('Starting parallel processing')
tic=timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align_parallel()
vid.create_average_frame()
vid.create_stdev_frame()
toc = timeit.default_timer()
print 'Parallel Process took {}:'.format(toc-tic)
vid.create_stdev_frame()
logging.info('writing output')
vid.write_video('output/output_parallel.avi')
vid.write_average_frame('output/lucky_average_parallel.png')
vid.write_frame('output/lucky_stdev.png','stdev')
logging.info('Starting serial processing')
tic=timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align()
vid.create_average_frame()
toc = timeit.default_timer()
print 'Serial Process took {}:'.format(toc-tic)
logging.info('writing output')
vid.write_video('output/output_serial.avi')
vid.write_frame('output/lucky_average_serial.png','average')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Register frames from an AO video')
parser.add_argument('-v','--verbose',help='Increase the amount of output', action='store_true')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.info('started')
main()
|
mit
| 5,573,137,991,126,652,000
| 29.301887
| 99
| 0.674766
| false
| 3.614865
| false
| false
| false
|
rmadapur/networking-brocade
|
networking_brocade/mlx/ml2/fi_ni/driver_factory.py
|
1
|
3345
|
# Copyright 2015 Brocade Communications Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Returns the driver based on the firmware version of the device
"""
from neutron.i18n import _LE
from neutron.i18n import _LI
from oslo_log import log as logging
from oslo_utils import importutils
LOG = logging.getLogger(__name__)
CONNECTION_FACTORY = ("networking_brocade.mlx.ml2.connector_factory."
"ConnectorFactory")
FI_DRIVER = "networking_brocade.mlx.ml2.fi_ni.fi_driver.FastIronDriver"
NI_DRIVER = "networking_brocade.mlx.ml2.fi_ni.ni_driver.NetIronDriver"
NETIRON = "NetIron"
FASTIRON = "ICX"
FI = "FI"
NI = "NI"
class BrocadeDriverFactory(object):
"""
Factory class that decides which driver to use based on the
device type. It uses FastIron driver for ICX devices and
NetIron driver for MLX devices
"""
def get_driver(self, device):
"""
Returns the driver based on the firmware.
:param:device: A dictionary which has the device details
:returns: Appropriate driver for the device based on the firmware
version, None otherwise
:raises: Exception
"""
driver = None
address = device.get('address')
os_type = device.get('ostype')
if os_type == FI:
driver = importutils.import_object(FI_DRIVER, device)
elif os_type == NI:
driver = importutils.import_object(NI_DRIVER, device)
else:
connector = importutils.import_object(CONNECTION_FACTORY
).get_connector(device)
connector.connect()
version = connector.get_version()
connector.close_session()
if NETIRON in version:
LOG.info(
_LI("OS Type of the device %(host)s is as NetIron"),
{'host': address})
driver = importutils.import_object(NI_DRIVER, device)
device.update({'ostype': NI})
elif FASTIRON in version:
LOG.info(
_LI("OS Type of the device %(host)s is as FastIron"),
{'host': device.get('address')})
driver = importutils.import_object(FI_DRIVER, device)
device.update({'ostype': FI})
else:
LOG.exception(_LE("Brocade Driver Factory: failed to "
"identify device type for device="
"%(device)s"), {'device': address})
raise Exception("Unsupported firmware %(firmware)s for device "
"%(host)s", {'firmware': version,
'host': address})
return driver
|
apache-2.0
| -6,562,487,558,882,334,000
| 37.448276
| 79
| 0.592526
| false
| 4.305019
| false
| false
| false
|
carragom/modoboa
|
modoboa/admin/models/domain.py
|
1
|
9835
|
"""Models related to domains management."""
import datetime
from django.db import models
from django.db.models.manager import Manager
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _, ugettext_lazy
from django.contrib.contenttypes.fields import GenericRelation
from reversion import revisions as reversion
from modoboa.core import signals as core_signals
from modoboa.core.models import User, ObjectAccess
from modoboa.lib.exceptions import BadRequest, Conflict
from modoboa.parameters import tools as param_tools
from .. import signals
from .base import AdminObject
from .. import constants
class DomainManager(Manager):
def get_for_admin(self, admin):
"""Return the domains belonging to this admin
The result is a ``QuerySet`` object, so this function can be used
to fill ``ModelChoiceField`` objects.
"""
if admin.is_superuser:
return self.get_queryset()
return self.get_queryset().filter(owners__user=admin)
@python_2_unicode_compatible
class Domain(AdminObject):
"""Mail domain."""
name = models.CharField(ugettext_lazy('name'), max_length=100, unique=True,
help_text=ugettext_lazy("The domain name"))
quota = models.IntegerField()
enabled = models.BooleanField(
ugettext_lazy('enabled'),
help_text=ugettext_lazy("Check to activate this domain"),
default=True
)
owners = GenericRelation(ObjectAccess)
type = models.CharField(default="domain", max_length=20)
enable_dns_checks = models.BooleanField(
ugettext_lazy("Enable DNS checks"), default=True,
help_text=ugettext_lazy("Check to enable DNS checks for this domain")
)
objects = DomainManager()
class Meta:
permissions = (
("view_domain", "View domain"),
("view_domains", "View domains"),
)
ordering = ["name"]
app_label = "admin"
def __init__(self, *args, **kwargs):
"""Save name for further use."""
super(Domain, self).__init__(*args, **kwargs)
self.old_mail_homes = None
self.oldname = self.name
@property
def domainalias_count(self):
return self.domainalias_set.count()
@property
def mailbox_count(self):
return self.mailbox_set.count()
@property
def mbalias_count(self):
return self.alias_set.filter(internal=False).count()
@property
def identities_count(self):
"""Total number of identities in this domain."""
return (
self.mailbox_set.count() +
self.alias_set.filter(internal=False).count())
@property
def tags(self):
if self.type == "domain":
return [{"name": "domain", "label": _("Domain"), "type": "dom"}]
results = signals.get_domain_tags.send(
sender=self.__class__, domain=self)
return reduce(lambda a, b: a + b, [result[1] for result in results])
@property
def admins(self):
"""Return the domain administrators of this domain.
:return: a list of User objects
"""
return User.objects.filter(
is_superuser=False,
objectaccess__content_type__model="domain",
objectaccess__object_id=self.pk)
@property
def aliases(self):
return self.domainalias_set
@property
def uses_a_reserved_tld(self):
"""Does this domain use a reserved TLD."""
tld = self.name.split(".", 1)[-1]
return tld in constants.RESERVED_TLD
@property
def just_created(self):
"""Return true if the domain was created in the latest 24h."""
now = timezone.now()
delta = datetime.timedelta(days=1)
return self.creation + delta > now
def awaiting_checks(self):
"""Return true if the domain has no valid MX record and was created
in the latest 24h."""
if (not self.mxrecord_set.has_valids()) and self.just_created:
return True
return False
@cached_property
def dnsbl_status_color(self):
"""Shortcut to DNSBL results."""
if not self.dnsblresult_set.exists():
return "warning"
elif self.dnsblresult_set.blacklisted().exists():
return "danger"
else:
return "success"
def add_admin(self, account):
"""Add a new administrator to this domain.
:param User account: the administrator
"""
from modoboa.lib.permissions import grant_access_to_object
core_signals.can_create_object.send(
sender=self.__class__, context=self, object_type="domain_admins")
grant_access_to_object(account, self)
for mb in self.mailbox_set.all():
if mb.user.has_perm("admin.add_domain"):
continue
grant_access_to_object(account, mb)
grant_access_to_object(account, mb.user)
for al in self.alias_set.all():
grant_access_to_object(account, al)
def remove_admin(self, account):
"""Remove an administrator of this domain.
:param User account: administrator to remove
"""
from modoboa.lib.permissions import ungrant_access_to_object
ungrant_access_to_object(self, account)
for mb in self.mailbox_set.all():
if mb.user.has_perm("admin.add_domain"):
continue
ungrant_access_to_object(mb, account)
ungrant_access_to_object(mb.user, account)
for al in self.alias_set.all():
ungrant_access_to_object(al, account)
def save(self, *args, **kwargs):
"""Store current data if domain is renamed."""
if self.oldname != self.name:
self.old_mail_homes = (
dict((mb.id, mb.mail_home) for mb in self.mailbox_set.all())
)
super(Domain, self).save(*args, **kwargs)
def delete(self, fromuser, keepdir=False):
"""Custom delete method."""
from modoboa.lib.permissions import ungrant_access_to_objects
from .mailbox import Quota
if self.domainalias_set.count():
ungrant_access_to_objects(self.domainalias_set.all())
if self.alias_set.count():
ungrant_access_to_objects(self.alias_set.all())
if param_tools.get_global_parameter("auto_account_removal"):
for account in User.objects.filter(mailbox__domain=self):
account.delete(fromuser, keepdir)
elif self.mailbox_set.count():
Quota.objects.filter(username__contains="@%s" % self.name).delete()
ungrant_access_to_objects(self.mailbox_set.all())
super(Domain, self).delete()
def __str__(self):
return smart_text(self.name)
def from_csv(self, user, row):
"""Create a new domain from a CSV entry.
The expected fields order is the following::
"domain", name, quota, enabled
:param ``core.User`` user: user creating the domain
:param str row: a list containing domain's definition
"""
if len(row) < 4:
raise BadRequest(_("Invalid line"))
self.name = row[1].strip()
if Domain.objects.filter(name=self.name).exists():
raise Conflict
try:
self.quota = int(row[2].strip())
except ValueError:
raise BadRequest(
_("Invalid quota value for domain '%s'") % self.name)
self.enabled = (row[3].strip() in ["True", "1", "yes", "y"])
self.save(creator=user)
def to_csv(self, csvwriter):
csvwriter.writerow(["domain", self.name, self.quota, self.enabled])
for dalias in self.domainalias_set.all():
dalias.to_csv(csvwriter)
def post_create(self, creator):
"""Post creation actions.
:param ``User`` creator: user whos created this domain
"""
super(Domain, self).post_create(creator)
for domalias in self.domainalias_set.all():
domalias.post_create(creator)
reversion.register(Domain)
class MXQuerySet(models.QuerySet):
"""Custom manager for MXRecord."""
def has_valids(self):
"""Return managed results."""
if param_tools.get_global_parameter("valid_mxs").strip():
return self.filter(managed=True).exists()
return self.exists()
class MXRecord(models.Model):
"""A model used to store MX records for Domain."""
domain = models.ForeignKey(Domain)
name = models.CharField(max_length=254)
address = models.GenericIPAddressField()
managed = models.BooleanField(default=False)
updated = models.DateTimeField()
objects = models.Manager.from_queryset(MXQuerySet)()
def is_managed(self):
if not param_tools.get_global_parameter("enable_mx_checks"):
return False
return bool(param_tools.get_global_parameter("valid_mxs").strip())
def __unicode__(self):
return u"{0.name} ({0.address}) for {0.domain} ".format(self)
class DNSBLQuerySet(models.QuerySet):
"""Custom manager for DNSBLResultManager."""
def blacklisted(self):
"""Return blacklisted results."""
return self.exclude(status="")
class DNSBLResult(models.Model):
"""Store a DNSBL query result."""
domain = models.ForeignKey(Domain)
provider = models.CharField(max_length=254, db_index=True)
mx = models.ForeignKey(MXRecord)
status = models.CharField(max_length=45, blank=True, db_index=True)
objects = models.Manager.from_queryset(DNSBLQuerySet)()
class Meta:
app_label = "admin"
unique_together = [("domain", "provider", "mx")]
|
isc
| -5,286,976,501,532,225,000
| 32.003356
| 79
| 0.621556
| false
| 4.022495
| false
| false
| false
|
nubakery/smith3
|
python/relcaspt2/queue_split.py
|
1
|
2189
|
#!/opt/local/bin/python
import string
import os
import re
def header(n) :
return "//\n\
// BAGEL - Brilliantly Advanced General Electronic Structure Library\n\
// Filename: RelCASPT2" + n + ".cc\n\
// Copyright (C) 2014 Toru Shiozaki\n\
//\n\
// Author: Toru Shiozaki <shiozaki@northwestern.edu>\n\
// Maintainer: Shiozaki group\n\
//\n\
// This file is part of the BAGEL package.\n\
//\n\
// This program is free software: you can redistribute it and/or modify\n\
// it under the terms of the GNU General Public License as published by\n\
// the Free Software Foundation, either version 3 of the License, or\n\
// (at your option) any later version.\n\
//\n\
// This program is distributed in the hope that it will be useful,\n\
// but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
// GNU General Public License for more details.\n\
//\n\
// You should have received a copy of the GNU General Public License\n\
// along with this program. If not, see <http://www.gnu.org/licenses/>.\n\
//\n\
\n\
#include <bagel_config.h>\n\
#ifdef COMPILE_SMITH\n\
\n\
\n\
#include <src/smith/relcaspt2/RelCASPT2.h>\n"
def insert():
return "#include <src/smith/relcaspt2/RelCASPT2_tasks.h>\n"
def header2():
return "\n\
using namespace std;\n\
using namespace bagel;\n\
using namespace bagel::SMITH;\n\
\n\
"
footer = "#endif\n"
f = open('RelCASPT2.cc', 'r')
lines = f.read().split("\n")[34:]
tasks = []
tmp = ""
for line in lines:
if (len(line) >= 17 and (line[0:17] == "shared_ptr<Queue>" or line[0:17] == "RelCASPT2::RelCAS")):
if (tmp != ""):
tasks.append(tmp)
tmp = ""
tmp += line + "\n"
if (line == "}"):
tmp += "\n"
tasks.append(tmp)
p = re.compile('make_[a-z0-9]+q')
for task in tasks[0:-1]:
tag = p.search(task).group()[5:]
fout = open("RelCASPT2_" + tag + ".cc", "w")
out = header("_" + tag + "q") + insert() + header2() + task + footer
fout.write(out)
fout.close()
os.remove("RelCASPT2.cc")
fout = open("RelCASPT2.cc", "w")
out = header("") + header2() + tasks[len(tasks)-1] + footer
fout.write(out)
fout.close()
|
gpl-2.0
| 3,854,441,453,575,640,600
| 26.3625
| 102
| 0.636364
| false
| 2.820876
| false
| false
| false
|
cheery/essence
|
interpret.py
|
1
|
2438
|
# This file is part of Essential Editor Research Project (EERP)
#
# EERP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EERP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EERP. If not, see <http://www.gnu.org/licenses/>.
from essence import load
import sys
class closure(object):
def __init__(self, arguments, body, env):
self.arguments = arguments
self.body = body
self.env = env
def apply(self, arguments):
env = dict(zip(self.arguments, arguments))
env['__parent__'] = self.env
res = None
for expr in self.body.array:
res = interpret(expr, env)
return res
def interpret(expr, env):
name = expr.get('name')
if name == 'int':
return int(expr.string) # on early versions it's a string.
if name == 'mul':
left, right = expr.array
return interpret(left, env) * interpret(right, env)
if name == 'add':
left, right = expr.array
return interpret(left, env) + interpret(right, env)
if name == 'set':
left, right = expr.array
env[left] = interpret(right, env)
if name == 'variable':
variable = expr.string
if not variable in env:
raise Exception("%r not in %r" % (variable, env))
return env[variable]
if name == 'define':
name, arglist, body = expr.array
arguments = []
for argument in arglist.array:
assert argument.get('name') == 'variable'
arguments.append(argument.string)
env[name] = closure(arguments, body, env)
if name == 'call':
caller, arguments = expr.array
caller = interpret(caller, env)
arguments = [interpret(arg, env) for arg in arguments]
return caller.apply(arguments)
raise Exception("unknown clause %r", expr)
program = load(sys.argv[1])
assert program.get('name') == 's-expr'
env = {}
res = None
for item in program.array:
res = interpret(item, env)
print res
|
gpl-3.0
| 6,000,114,890,642,886,000
| 33.338028
| 70
| 0.634126
| false
| 3.932258
| false
| false
| false
|
dsweet04/rekall
|
rekall-core/rekall/plugins/response/renderers.py
|
1
|
3128
|
# Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__author__ = "Michael Cohen <scudette@google.com>"
from rekall.ui import text
from rekall.plugins.renderers import data_export
from rekall_lib import utils
class FileSpec_Text(text.TextObjectRenderer):
renders_type = "FileSpec"
def render_row(self, target, width=None, **_):
if target.filesystem == "API":
return text.Cell(unicode(target.name), width=width)
else:
return text.Cell(u"%s (%s)" % (target.name, target.filesystem),
width=width)
class FileInformation_TextObjectRenderer(text.TextObjectRenderer):
renders_type = "FileInformation"
def render_row(self, target, **options):
return FileSpec_Text(
renderer=self.renderer, session=self.session).render_row(
target.filename, **options)
class UserTextObjectRenderer(text.TextObjectRenderer):
renders_type = "User"
def render_row(self, item, **_):
if item.username:
return text.Cell(u"%s (%s)" % (item.username, item.uid))
return text.Cell(unicode(item.uid))
class GroupTextObjectRenderer(text.TextObjectRenderer):
renders_type = "Group"
def render_row(self, item, **_):
if item.group_name:
return text.Cell(u"%s (%s)" % (item.group_name, item.gid))
return text.Cell(unicode(item.gid))
class DataExportFileSpecObjectRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "FileSpec"
def Summary(self, item, **_):
return utils.SmartStr(item)
def GetState(self, item, **options):
return dict(filesystem=item.filesystem, name=item.name)
class PermissionsFileSpecObjectRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "Permissions"
def Summary(self, item, **_):
return utils.SmartStr(item)
def GetState(self, item, **options):
return dict(perm=str(item), int_perm=int(item))
class LiveProcessTextRenderer(text.TextObjectRenderer):
renders_type = "LiveProcess"
def render_row(self, target, width=None, **_):
return text.Cell("%s (%s)" % (target.name, target.pid), width=width)
class LiveProcessDataExportRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "LiveProcess"
def GetState(self, item, **_):
return item.as_dict()
|
gpl-2.0
| -267,481,225,361,954,530
| 30.59596
| 76
| 0.684783
| false
| 3.805353
| false
| false
| false
|
phoebe-project/phoebe2
|
tests/nosetests/test_blackbody/test_blackbody.py
|
1
|
1553
|
"""
"""
import phoebe
from phoebe import u
import numpy as np
import matplotlib.pyplot as plt
def test_binary(plot=False):
b = phoebe.Bundle.default_binary()
# Two spherical suns
b.set_value_all('teff', value=5772.)
b.set_value('sma', component='binary', value=100.)
b.set_value('period', component='binary', value=81.955)
b.add_dataset('lc', times=np.linspace(0,100,21))
b.add_compute('phoebe', compute='phoebe2')
b.add_compute('legacy', compute='phoebe1')
# set matching atmospheres
b.set_value_all('atm', 'extern_planckint')
# turn off limb-darkening:
b.set_value_all('ld_mode_bol', 'manual')
b.set_value_all('ld_func_bol', 'linear')
b.set_value_all('ld_coeffs_bol', [0.0])
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'linear')
b.set_value_all('ld_coeffs', [0.0])
#turn off albedos (legacy requirement)
b.set_value_all('irrad_frac_refl_bol', 0.0)
if plot: print("running phoebe2 model...")
b.run_compute(compute='phoebe2', irrad_method='none', model='phoebe2model')
if plot: print("running phoebe1 model...")
b.run_compute(compute='phoebe1', refl_num=0, model='phoebe1model')
phoebe2_val = b.get_value('fluxes@phoebe2model')
phoebe1_val = b.get_value('fluxes@phoebe1model')
if plot:
b.plot(dataset='lc01', show=True)
assert(np.allclose(phoebe2_val, phoebe1_val, rtol=1e-3, atol=0.))
return b
if __name__ == '__main__':
logger = phoebe.logger(clevel='INFO')
b = test_binary(plot=True)
|
gpl-3.0
| 923,983,890,196,022,900
| 26.732143
| 79
| 0.641339
| false
| 2.696181
| false
| false
| false
|
wojtask/CormenPy
|
test/test_chapter15/test_exercise15_5_1.py
|
1
|
2830
|
import io
import re
from contextlib import redirect_stdout
from unittest import TestCase
from hamcrest import *
from chapter15.exercise15_5_1 import construct_optimal_bst
from chapter15.textbook15_5 import optimal_bst
from test_chapter15.test_textbook15_5 import get_probabilities_for_optimal_bst
def assert_optimal_bst_output(actual_output, root):
n = root.length
root_id = int(re.search('k(\d+) is the root', actual_output[0]).group(1))
assert_that(root_id, is_(equal_to(root[1, n])))
line_no = assert_left_child_output(actual_output, root, 1, root_id - 1, 1)
line_no = assert_right_child_output(actual_output, root, root_id + 1, n, line_no + 1)
assert_that(actual_output, has_length(line_no + 1))
def assert_left_child_output(actual_output, root, i, j, line_no):
parent = j + 1
comp = re.compile('([kd])(\d+) is the left child of k(\d+)')
node_type = comp.search(actual_output[line_no]).group(1)
node_id = int(comp.search(actual_output[line_no]).group(2))
actual_parent = int(comp.search(actual_output[line_no]).group(3))
assert_that(actual_parent, is_(equal_to(parent)))
if i <= j:
assert_that(node_type, is_(equal_to('k')))
assert_that(node_id, is_(equal_to(root[i, j])))
line_no = assert_left_child_output(actual_output, root, i, node_id - 1, line_no + 1)
line_no = assert_right_child_output(actual_output, root, node_id + 1, j, line_no + 1)
else:
assert_that(node_type, is_(equal_to('d')))
assert_that(node_id, is_(equal_to(j)))
return line_no
def assert_right_child_output(actual_output, root, i, j, line_no):
parent = i - 1
comp = re.compile('([kd])(\d+) is the right child of k(\d+)')
node_type = comp.search(actual_output[line_no]).group(1)
node_id = int(comp.search(actual_output[line_no]).group(2))
actual_parent = int(comp.search(actual_output[line_no]).group(3))
assert_that(actual_parent, is_(equal_to(parent)))
if i <= j:
assert_that(node_type, is_(equal_to('k')))
assert_that(node_id, is_(equal_to(root[i, j])))
line_no = assert_left_child_output(actual_output, root, i, node_id - 1, line_no + 1)
line_no = assert_right_child_output(actual_output, root, node_id + 1, j, line_no + 1)
else:
assert_that(node_type, is_(equal_to('d')))
assert_that(node_id, is_(equal_to(j)))
return line_no
class TestExercise15_5_1(TestCase):
def test_construct_optimal_bst(self):
p, q = get_probabilities_for_optimal_bst()
_, root = optimal_bst(p, q, p.length)
captured_output = io.StringIO()
with redirect_stdout(captured_output):
construct_optimal_bst(root)
actual_output = captured_output.getvalue().splitlines()
assert_optimal_bst_output(actual_output, root)
|
gpl-3.0
| 5,799,465,066,080,409,000
| 40.014493
| 93
| 0.645936
| false
| 2.991543
| true
| false
| false
|
rahlk/CSC579__Computer_Performance_Modeling
|
simulation/proj1/tasks/task5.py
|
1
|
2063
|
from __future__ import division
from __future__ import print_function
import os
import sys
import functools
# Update path
root = os.path.join(os.getcwd().split('proj1')[0], 'proj1')
if root not in sys.path:
sys.path.append(root)
import numpy as np
import pandas as pd
import multiprocessing
from pdb import set_trace
from Simulator import simulate
from Utils.PlotsUtils import line, line2
from Utils.RandomUtil import Random
from Utils.MisclUtils import TimeUtil
rand = Random()
timer = TimeUtil()
# Set seed
rand.set_seed(seed_val=12458)
def customer_loss_rate(customers):
served = np.sum([customer.serviced for customer in customers])
total = len(customers)
return served / total
def plot_runtime(x=None, y=None):
line(x, y, x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$")
def plot_runtime_vs_avg(x, y, y_1):
line2(x, y, x, y_1, label_1="Actual Runtimes", label_2="Expected value of $\rho$", x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$")
def task_5():
rho_list = np.arange(0.05, 1, 0.1)
C = 1e5
elapsed = []
for rho in rho_list:
start_time = timer.current_time()
serviced = simulate(l = rho, server_lim = 40, max_serviced=C, L=1, verbose=False)
end_time = timer.current_time()
elapsed.append(end_time-start_time)
data = pd.DataFrame([[a,b] for a, b in zip(rho_list, elapsed)], columns=["Rho", "Seconds"])
data.to_csv(os.path.abspath(os.path.join(root,"tasks/task5.csv")))
def task5_plot():
data = pd.read_csv(os.path.abspath("tasks/task5.csv"))
plot_runtime(data["Rho"], data["Seconds"])
set_trace()
def compare_plot():
rho_list = np.arange(0.05, 1, 0.1)
average_rho = [np.mean([rand.exponential(lam=p) for _ in xrange(10000)]) for p in rho_list]
data = pd.read_csv(os.path.abspath("tasks/task5.csv"))
plot_runtime(data["Rho"], average_rho)
if __name__ == "__main__":
task_5()
task5_plot()
compare_plot()
|
mit
| 711,134,336,705,916,900
| 28.898551
| 193
| 0.654387
| false
| 2.893408
| false
| false
| false
|
JackDesBwa/IrisMonitor
|
decoders/test.py
|
1
|
1272
|
#!/usr/bin/env python
import sys, os
if os.path.dirname(os.path.abspath(__file__)) in sys.path:
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
if os.path.dirname(os.path.dirname(os.path.abspath(__file__))) not in sys.path:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import decoders, events
def onAllMessages(event, data):
sys.stdout.write('%f : ' % data[0])
sys.stdout.write('%s - ' % event)
print(data[1])
events.register_all(decoders.IrisDecoder, onAllMessages)
sys.stdout.write('Available decoders are :\n')
i = 0
declist = decoders.get_list()
for d in declist:
sys.stdout.write(' %3d %s\n' % (i, d))
i += 1
try:
if len(declist) == 0:
sys.stderr.write('No channel available.\n')
if len(declist) == 1:
a = 0
else:
sys.stdout.write('Which one would be tested ? ')
sys.stdout.flush()
a = int(sys.stdin.readline())
sys.stdout.write('Test of `%s`\n' % declist[a])
theclass = decoders.get_class(declist[a])
decoder = theclass()
except ValueError:
sys.exit('Invalid number.\n')
except IndexError:
sys.exit('Unknowed entry.\n')
except KeyboardInterrupt:
sys.exit('Interrupted test.\n')
sys.stdout.write('The decoder has to be tested in conjunction with a channel.\n')
from channels import test
|
mit
| 8,655,242,273,459,935,000
| 27.909091
| 81
| 0.691824
| false
| 2.777293
| false
| false
| false
|
gwct/grampa
|
lib/mul_recon.py
|
1
|
12960
|
#!/usr/bin/python
#############################################################################
# The main algorithmic functions for MUL-reconciliation mapping.
# Gregg Thomas
# Fall 2015, Combo algorithm implemented Spring 2016
#############################################################################
import os, itertools, recontree as RT, mul_tree as MT, reconcore as RC, gene_tree as GT, global_vars as globs
import pickle
#############################################################################
def reconLCA(lca_ginfo, sinfo, lca_maps, retmap=False):
# The LCA reconciliation mapping algorithm.
internal_nodes = RT.sortNodes(lca_ginfo);
# Sort the internal nodes for a post order traversal.
score = 0;
if retmap:
dups, losses = {}, {};
for g in lca_ginfo:
dups[g], losses[g] = 0, 0;
for g in internal_nodes:
g = "<" + str(g) + ">";
d1, d2 = RT.getDesc(g, lca_ginfo);
is_dup = 0;
g_clade = RT.getClade(g, lca_ginfo);
clade_maps = [];
for g_tip in g_clade:
clade_maps.append(lca_maps[g_tip][0]);
# Get the species in the clade of the current node. Then get all
# the possible maps from those species.
lca_maps[g].append(RT.LCA(clade_maps,sinfo)[0]);
if lca_maps[g][0] == lca_maps[d1][0] or lca_maps[g][0] == lca_maps[d2][0]:
if retmap:
dups[g] += 1;
score += 1;
is_dup = 1;
#Now, if the map of g is identical to one of its descendants, it is a duplication node.
cur_depth = len(RT.nodeDepth(lca_maps[g][0],sinfo))
if lca_ginfo[g][2] == 'root':
if retmap:
losses[g] += cur_depth;
score += cur_depth;
# The number of losses at the root of the gene tree is equal to the depth of its map.
d1_depth = len(RT.nodeDepth(lca_maps[d1][0],sinfo));
d1_loss = (d1_depth - cur_depth - 1) + is_dup;
score += d1_loss
if retmap:
losses[d1] += d1_loss;
d2_depth = len(RT.nodeDepth(lca_maps[d2][0],sinfo))
d2_loss = (d2_depth - cur_depth - 1) + is_dup;
score += d2_loss;
if retmap:
losses[d2] += d2_loss;
# Counting losses for each of the descendents of the current node.
if retmap:
return lca_maps, dups, losses;
return score;
# Return the total number of duplication nodes.
#############################################################################
def getSis(gs_node, check_node, check_clade, gs_dict):
# Gets the hybrid and copy sister species.
d1, d2 = RT.getDesc(gs_node, gs_dict);
if d1 == check_node:
sis_node = d2;
elif d2 == check_node:
sis_node = d1;
sis_clade = RT.getClade(sis_node, gs_dict);
if any(c in check_clade for c in sis_clade):
return [];
else:
return sis_clade;
#############################################################################
def collapseGroups(mul_input, gene_trees_filtered_cg, spec_type_cg, v, pickle_dir, nmt):
# The collapseGroups function goes through all gene tree-MUL-tree combos to collapse the groups.
mul_num, mul_tree = mul_input;
if v == 1:
print("# " + RC.getDateTime() + " --> Collapsing groups for MUL-tree # " + str(mul_num) + " / " + str(nmt));
if mul_num == 0:
return mul_num, [];
gt_groups = {};
mt, minfo, hybrid_clade, hybrid_node, copy_node = mul_tree[0], mul_tree[1], mul_tree[2], mul_tree[3], mul_tree[4];
for gene_num in gene_trees_filtered_cg:
gene_tree = gene_trees_filtered_cg[gene_num];
if len(gene_tree) == 1:
continue;
# If the gene tree was previously filtered, the list will only contain the filter message and it should be skipped here.
gt,ginfo = gene_tree;
internal_nodes = RT.sortNodes(ginfo);
# Sort the internal nodes for a post order traversal.
singles, groups = {}, {};
for g in ginfo:
if ginfo[g][2] == 'tip':
if g[g.rfind("_")+1:] in hybrid_clade:
cur_anc = ginfo[g][1];
anc_clade = RT.getClade(cur_anc, ginfo);
anc_clade.remove(g);
singles[g] = anc_clade;
# First, all hybrid species nodes in the gene tree are added to the singles list.
## GETS SINGLETONS
for g in internal_nodes:
g = "<" + str(g) + ">";
# Next, for any non-tip node, we find out if the species that define it can be grouped
d1, d2 = RT.getDesc(g, ginfo);
d1_clade = RT.getClade(d1, ginfo);
d1_spec_clade = [spec[spec.rfind("_")+1:] for spec in d1_clade];
d2_clade = RT.getClade(d2,ginfo);
d2_spec_clade = [spec[spec.rfind("_")+1:] for spec in d2_clade];
# The clades for the descendants of both nodes are retrieved, and their corresponding
# species are stored.
if all(s in hybrid_clade for s in d1_spec_clade) and all(s in hybrid_clade for s in d2_spec_clade):
# If the descendants from both nodes are all hybrid clade species, then we may be able to group them.
if not any(s in d2_spec_clade for s in d1_spec_clade):
# However, only if there is not more than one copy of a species among the clades can they be grouped.
cur_clade = RT.getClade(g, ginfo);
cur_anc = ginfo[g][1];
anc_clade = RT.getClade(cur_anc, ginfo);
anc_clade = [spec for spec in anc_clade if spec not in cur_clade];
cur_nodes = RT.getCladeNode(g, ginfo);
for node in cur_nodes:
if node in groups:
del groups[node];
groups[g] = [cur_clade, anc_clade];
## CHECKS GROUPINGS
for group in groups:
for g in groups[group][0]:
if g in singles:
del singles[g];
# Removes any singles that are in a group.
final_groups = [];
for node in groups:
final_groups.append(groups[node]);
for single in singles:
final_groups.append([[single], singles[single]]);
# Restructures the final groups and adds singles.
sisters = {};
if spec_type_cg == 's':
mul_hybrid_node = [n for n in minfo if set(RT.getClade(n, minfo)) == set(hybrid_clade)][0];
copy_clade = [c + "*" for c in hybrid_clade];
mul_copy_node = [n for n in minfo if set(RT.getClade(n, minfo)) == set(copy_clade)][0];
# The copy clade is defined.
elif spec_type_cg == 'm':
copy_clade = RT.getClade(copy_node, minfo);
mul_hybrid_node = hybrid_node;
mul_copy_node = copy_node;
hybrid_anc = minfo[mul_hybrid_node][1];
copy_anc = minfo[mul_copy_node][1];
sisters[''] = getSis(hybrid_anc, mul_hybrid_node, copy_clade, minfo);
sisters['*'] = getSis(copy_anc, mul_copy_node, hybrid_clade, minfo);
# These lines get any sister species from the hybrid and copy clades in the MUL-tree and that
# clade's corresponding map. If there are no sisters, it stores an empty list.
groups, fixed_groups = [], [];
for group in final_groups:
group_sis = [spec[spec.rfind("_")+1:] for spec in group[1]];
if group_sis == []:
groups.append(group[0]);
continue;
if all(spec in sisters[''] for spec in group_sis):
fixed_groups.append([group[0],'']);
elif all(spec in sisters['*'] for spec in group_sis):
fixed_groups.append([group[0],'*']);
else:
groups.append(group[0]);
# This checks the sister species of all the groups for the gene tree. If all the sister species
# of a group are also in the sister species of the hybrid or copy clade in the MUL-tree, then we
# can fix the mapping of that node.
## FINDS FIXED SISTER GROUPS
gt_groups[gene_num] = [groups, fixed_groups];
# Adding the groups and fixed groups to the current gt_groups.
groupoutfile = os.path.join(pickle_dir, str(mul_num) + "_groups.pickle");
pickle.dump(gt_groups, open(groupoutfile, "wb"));
del groups, fixed_groups, final_groups, gene_trees_filtered_cg, gt_groups;
#############################################################################
def mulRecon(mul_input, gene_trees, v, pickle_dir, nmt, retmap=False):
# The basis of the MUL-reconciliation algorithm is that there are now nodes that
# have more than one possible map. We try all combinations of mappings for these
# nodes and find which combination(s) results in the most parsimonious mutation score
# (# duplication + # losses).
#
# A few prelminary steps are taken to ensure the quickest mapping groups:
# 1. Identify whether the hybrid or copy clade in the MUL-tree have sister groups. If so, we can use
# them to fix similar nodes in the gene tree.
# 2. Find nodes that contain only one or zero copies of the hybrid node species and species from one
# of the sister groups. Fix the mappings of these nodes.
# 3. Any other nodes that contain only one or zero copies of the hybrid node species can be grouped
# and should be mapped consistently, though we will still have to try both maps.
# 4. Remaining single hybrid nodes must be tried with both maps.
#
# Once these steps are done (in the collapseGroups function), a list of node groups is obtained, for
# which we generate every combination of map and try to reconcile to the MUL-tree. A score is obtained
# for each combination and the minimum score is kept as the correct map.
mul_num, mul_tree = mul_input
#main_output, det_output, min_num, min_score, min_maps, multiple_maps = {}, [], '', 9999999, {}, 0;
# mulpicklefile = os.path.join(pickle_dir, str(mul_num) + "_tree.pickle");
# mul_tree = pickle.load(open(mulpicklefile, "rb"));
if v == 1:
print("# " + RC.getDateTime() + " --> Reconciling to MUL-tree # " + str(mul_num) + " / " + str(nmt));
min_maps = {};
total_score = 0;
if mul_num != 0:
groupfilename = os.path.join(pickle_dir, str(mul_num) + "_groups.pickle");
cur_groups = pickle.load(open(groupfilename, "rb"));
for gene_num, gene_tree in gene_trees.items():
gt, ginfo = gene_tree;
gene_score = 99999;
min_maps[gene_num] = [];
if mul_num == 0:
sinfo = mul_tree[1];
init_maps = {};
for g in ginfo:
if ginfo[g][2] == 'tip':
speclabel = g[g.rfind("_")+1:];
init_maps[g] = [speclabel];
else:
init_maps[g] = [];
# Initialize the maps.
if retmap:
maps, node_dups, node_loss = reconLCA(ginfo, sinfo, init_maps, retmap);
num_dups = sum(node_dups.values());
num_loss = sum(node_loss.values());
gene_score = num_dups + num_loss;
min_maps[gene_num].append([gene_score, num_dups, num_loss, maps, node_dups, node_loss]);
else:
gene_score = reconLCA(ginfo, sinfo, init_maps);
total_score += gene_score;
# Some counting.
else:
mt, minfo, hybrid_clade, hybrid_node, copy_node, = mul_tree[0], mul_tree[1], mul_tree[2], mul_tree[3], mul_tree[4];
# Aggregate variables for the current GENE tree.
gt_groups, gt_fixed = cur_groups[gene_num][0], cur_groups[gene_num][1];
num_groups = len(gt_groups);
# Retrieve gene tree info and collapsed groups for this gene tree-MUL-tree combo
for combo in itertools.product(['','*'], repeat=num_groups):
# We get all combinations of mappings for each node group. This is the time constraining step.
group_map = [];
for i in range(len(combo)):
for node in gt_groups[i]:
group_map.append(node + combo[i]);
# This builds the current map for each group.
for fixed in gt_fixed:
for node in fixed[0]:
group_map.append(node + fixed[1]);
# This adds the fixed maps onto the current combination of group mappings.
# Now we do LCA mapping for the current combination of maps for the hybrid clade species.
maps = {};
for g in ginfo:
if ginfo[g][2] == 'tip':
speclabel = g[g.rfind("_")+1:];
if g in group_map:
maps[g] = [speclabel];
# If the node is in a hybrid clade, use the map in the current combination.
elif g + "*" in group_map:
maps[g] = [speclabel + "*"];
else:
maps[g] = [speclabel];
# Otherwise, the map is just the species label.
else:
maps[g] = [];
# And if the node is not a tip, the map is empty.
if retmap:
maps, node_dups, node_loss = reconLCA(ginfo, minfo, maps, retmap);
num_dups = sum(node_dups.values());
num_loss = sum(node_loss.values());
cur_score = num_dups + num_loss;
if cur_score <= gene_score:
if cur_score < gene_score:
gene_score = cur_score;
min_maps[gene_num] = [];
min_maps[gene_num].append([gene_score, num_dups, num_loss, maps, node_dups, node_loss])
else:
cur_score = reconLCA(ginfo, minfo, maps);
if cur_score < gene_score:
gene_score = cur_score;
# Once the current maps have been initialized, we can simply call the normal LCA mapping algorithm
## End mapping of one gene tree.
total_score += gene_score;
## End mapping all gene trees.
if retmap:
return min_maps;
else:
return mul_num, total_score;
# #############################################################################
# A couple ways to get the map combos:
# combo_ind = list(itertools.product(['','*'], repeat=len(node_ind)));
# if v == -2:
# print "num combos", len(combo_ind);
# combos = list(itertools.product(['','*'], repeat=len(node_ind)));
# Old loading:
# if v == 0 and numiters > 100:
# numbars, donepercent = RC.loadingBar(itercount, numiters, donepercent, numbars);
# itercount = itercount + 1;
# # Only the loading bar displays when the program is running if -v is set to 0.
|
gpl-3.0
| 5,744,836,206,608,884,000
| 33.195251
| 122
| 0.626157
| false
| 2.960256
| false
| false
| false
|
diofant/diofant
|
diofant/domains/domain.py
|
1
|
8541
|
"""Implementation of :class:`Domain` class."""
import abc
import inspect
from ..core import Expr
from ..core.compatibility import HAS_GMPY
from ..polys.orderings import lex
from ..polys.polyerrors import CoercionFailed, UnificationFailed
from ..polys.polyutils import _unify_gens
from ..printing.defaults import DefaultPrinting
from .domainelement import DomainElement
class Domain(DefaultPrinting, abc.ABC):
"""Represents an abstract domain."""
is_Ring = False
is_Field = False
has_assoc_Ring = False
is_FiniteField = False
is_IntegerRing = False
is_RationalField = False
is_RealField = False
is_ComplexField = False
is_AlgebraicField = False
is_RealAlgebraicField = False
is_ComplexAlgebraicField = False
is_PolynomialRing = False
is_FractionField = False
is_ExpressionDomain = False
is_Exact = True
is_Numerical = False
def __hash__(self):
return hash((self.__class__.__name__, self.dtype))
def __call__(self, *args):
"""Construct an element of ``self`` domain from ``args``."""
return self.dtype(*args)
def __getstate__(self):
return {}
@abc.abstractmethod
def from_expr(self, expr):
"""Convert Diofant's expression ``expr`` to ``dtype``."""
raise NotImplementedError
@abc.abstractmethod
def to_expr(self, element):
"""Convert domain ``element`` to Diofant expression."""
raise NotImplementedError
def convert_from(self, element, base):
"""Convert ``element`` to ``self.dtype`` given the base domain."""
for superclass in inspect.getmro(base.__class__):
method = '_from_' + superclass.__name__
convert = getattr(self, method, None)
if convert:
result = convert(element, base)
if result is not None:
return result
raise CoercionFailed(f"can't convert {element} of type {type(element)} "
f'from {base} to {self}')
def convert(self, element, base=None):
"""Convert ``element`` to ``self.dtype``."""
if base is not None:
return self.convert_from(element, base)
if isinstance(element, self.dtype):
return element
from . import ComplexField, PythonRational, RealField
from .expressiondomain import ExpressionDomain
from .integerring import GMPYIntegerRing, PythonIntegerRing
from .rationalfield import GMPYRationalField, PythonRationalField
if isinstance(element, int):
return self.convert_from(element, PythonIntegerRing())
if isinstance(element, PythonRational):
return self.convert_from(element, PythonRationalField())
if HAS_GMPY:
integers = GMPYIntegerRing()
if isinstance(element, integers.dtype):
return self.convert_from(element, integers)
rationals = GMPYRationalField()
if isinstance(element, rationals.dtype):
return self.convert_from(element, rationals)
if isinstance(element, float):
parent = RealField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, complex):
parent = ComplexField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, DomainElement):
return self.convert_from(element, element.parent)
if isinstance(element, ExpressionDomain.Expression):
return self.convert_from(element, ExpressionDomain())
if isinstance(element, Expr):
try:
return self.from_expr(element)
except (TypeError, ValueError):
pass
raise CoercionFailed(f"can't convert {element} of type {type(element)} to {self}")
def __contains__(self, a):
"""Check if ``a`` belongs to this domain."""
try:
self.convert(a)
return True
except CoercionFailed:
return False
def _from_PolynomialRing(self, a, K0):
if a.is_ground:
return self.convert(a.LC, K0.domain)
def _from_FractionField(self, a, K0):
if a.numerator.is_ground and a.denominator == 1:
return self.convert(a.numerator.LC, K0.domain.ring)
def unify(self, K1, symbols=()):
"""
Construct a minimal domain that contains elements of ``self`` and ``K1``.
Known domains (from smallest to largest):
- ``GF(p)``
- ``ZZ``
- ``QQ``
- ``RR(prec, tol)``
- ``CC(prec, tol)``
- ``ALG(a, b, c)``
- ``K[x, y, z]``
- ``K(x, y, z)``
- ``EX``
"""
from .compositedomain import CompositeDomain
if symbols:
if any(isinstance(d, CompositeDomain) and (set(d.symbols) & set(symbols))
for d in [self, K1]):
raise UnificationFailed(f"Can't unify {self} with {K1}, "
f'given {symbols} generators')
return self.unify(K1)
if self == K1:
return self
if self.is_ExpressionDomain:
return self
if K1.is_ExpressionDomain:
return K1
if any(isinstance(d, CompositeDomain) for d in (self, K1)):
if isinstance(self, CompositeDomain):
self_ground = self.domain
self_symbols = self.symbols
order = self.order
else:
self_ground = self
self_symbols = ()
order = K1.order
if isinstance(K1, CompositeDomain):
K1_ground = K1.domain
K1_symbols = K1.symbols
else:
K1_ground = K1
K1_symbols = ()
domain = self_ground.unify(K1_ground)
symbols = _unify_gens(self_symbols, K1_symbols)
if ((self.is_FractionField and K1.is_PolynomialRing or
K1.is_FractionField and self.is_PolynomialRing) and
(not self_ground.is_Field or not K1_ground.is_Field) and domain.has_assoc_Ring):
domain = domain.ring
if isinstance(self, CompositeDomain) and (not isinstance(K1, CompositeDomain) or self.is_FractionField or K1.is_PolynomialRing):
cls = self.__class__
else:
cls = K1.__class__
return cls(domain, symbols, order)
def mkinexact(cls, K0, K1):
prec = max(K0.precision, K1.precision)
tol = max(K0.tolerance, K1.tolerance)
return cls(prec=prec, tol=tol)
if self.is_ComplexField and K1.is_ComplexField:
return mkinexact(self.__class__, self, K1)
if self.is_ComplexField and K1.is_RealField:
return mkinexact(self.__class__, self, K1)
if self.is_RealField and K1.is_ComplexField:
return mkinexact(K1.__class__, K1, self)
if self.is_RealField and K1.is_RealField:
return mkinexact(self.__class__, self, K1)
if self.is_ComplexField or self.is_RealField:
return self
if K1.is_ComplexField or K1.is_RealField:
return K1
if self.is_AlgebraicField and K1.is_AlgebraicField:
return self.__class__(self.domain.unify(K1.domain), *_unify_gens(self.gens, K1.gens))
elif self.is_AlgebraicField:
return self
elif K1.is_AlgebraicField:
return K1
if self.is_RationalField:
return self
if K1.is_RationalField:
return K1
if self.is_FiniteField and self.domain == K1:
return self
if K1.is_FiniteField and K1.domain == self:
return K1
raise NotImplementedError
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent."""
return isinstance(other, Domain) and self.dtype == other.dtype
def get_exact(self):
return self
def poly_ring(self, *symbols, **kwargs):
"""Returns a polynomial ring, i.e. `K[X]`."""
from ..polys import PolynomialRing
return PolynomialRing(self, symbols, kwargs.get('order', lex))
def frac_field(self, *symbols, **kwargs):
"""Returns a fraction field, i.e. `K(X)`."""
from ..polys import FractionField
return FractionField(self, symbols, kwargs.get('order', lex))
|
bsd-3-clause
| 1,690,075,486,520,412,000
| 32.104651
| 140
| 0.580026
| false
| 4.098369
| false
| false
| false
|
scrapinghub/python-hubstorage
|
tests/test_jobsmeta.py
|
1
|
3937
|
"""
Test job metadata
System tests for operations on stored job metadata
"""
from .hstestcase import HSTestCase
class JobsMetadataTest(HSTestCase):
def _assertMetadata(self, meta1, meta2):
def _clean(m):
return dict((k, v) for k, v in m.items() if k != 'updated_time')
meta1 = _clean(meta1)
meta2 = _clean(meta2)
self.assertEqual(meta1, meta2)
def test_basic(self):
job = self.project.push_job(self.spidername)
self.assertTrue('auth' not in job.metadata)
self.assertTrue('state' in job.metadata)
self.assertEqual(job.metadata['spider'], self.spidername)
# set some metadata and forget it
job.metadata['foo'] = 'bar'
self.assertEqual(job.metadata['foo'], 'bar')
job.metadata.expire()
self.assertTrue('foo' not in job.metadata)
# set it again and persist it
job.metadata['foo'] = 'bar'
self.assertEqual(job.metadata['foo'], 'bar')
job.metadata.save()
self.assertEqual(job.metadata['foo'], 'bar')
job.metadata.expire()
self.assertEqual(job.metadata['foo'], 'bar')
# refetch the job and compare its metadata
job2 = self.hsclient.get_job(job.key)
self._assertMetadata(job2.metadata, job.metadata)
# delete foo but do not persist it
del job.metadata['foo']
self.assertTrue('foo' not in job.metadata)
job.metadata.expire()
self.assertEqual(job.metadata.get('foo'), 'bar')
# persist it to be sure it is not removed
job.metadata.save()
self.assertEqual(job.metadata.get('foo'), 'bar')
# and finally delete again and persist it
del job.metadata['foo']
self.assertTrue('foo' not in job.metadata)
job.metadata.save()
self.assertTrue('foo' not in job.metadata)
job.metadata.expire()
self.assertTrue('foo' not in job.metadata)
job2 = self.hsclient.get_job(job.key)
self._assertMetadata(job.metadata, job2.metadata)
def test_updating(self):
job = self.project.push_job(self.spidername)
self.assertIsNone(job.metadata.get('foo'))
job.update_metadata({'foo': 'bar'})
# metadata attr should change
self.assertEqual(job.metadata.get('foo'), 'bar')
# as well as actual metadata
job = self.project.get_job(job.key)
self.assertEqual(job.metadata.get('foo'), 'bar')
job.update_metadata({'foo': None})
self.assertFalse(job.metadata.get('foo', False))
# there are ignored fields like: auth, _key, state
state = job.metadata['state']
job.update_metadata({'state': 'running'})
self.assertEqual(job.metadata['state'], state)
def test_representation(self):
job = self.project.push_job(self.spidername)
meta = job.metadata
self.assertNotEqual(str(meta), repr(meta))
self.assertEqual(meta, eval(str(meta)))
self.assertTrue(meta.__class__.__name__ in repr(meta))
self.assertFalse(meta.__class__.__name__ in str(meta))
def test_jobauth(self):
job = self.project.push_job(self.spidername)
self.assertIsNone(job.jobauth)
self.assertEqual(job.auth, self.project.auth)
self.assertEqual(job.items.auth, self.project.auth)
samejob = self.hsclient.get_job(job.key)
self.assertIsNone(samejob.auth)
self.assertIsNone(samejob.jobauth)
self.assertEqual(samejob.items.auth, self.project.auth)
def test_authtoken(self):
pendingjob = self.project.push_job(self.spidername)
runningjob = self.start_job()
self.assertEqual(pendingjob.key, runningjob.key)
self.assertTrue(runningjob.jobauth)
self.assertEqual(runningjob.jobauth, runningjob.auth)
self.assertEqual(runningjob.auth[0], runningjob.key)
self.assertTrue(runningjob.auth[1])
|
bsd-3-clause
| -4,820,492,767,777,208,000
| 36.495238
| 76
| 0.630937
| false
| 3.774688
| true
| false
| false
|
franapoli/pyleaf
|
pyleaf/rrc.py
|
1
|
5065
|
# The MIT License (MIT)
# Copyright (c) 2012-2013 Francesco Napolitano, franapoli@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from pyleaf import log
import pickle
import inspect
class resource():
def __init__(self, name, path):
log.send('Initializing resource ' + name + ' with path ' + path, 3)
self._name=name
self._path = path
if self.isDumped():
self.load()
def clear(self):
self._contents = None
self._fingerprint = None
def name(self):
return str(self)
def update(self):
if self.changed():
if self._fingerprint != None:
log.send(self.name() + ' has changed: updating.')
else:
log.send(self.name() + ' is new: building fingerprint.')
self.updateFingerprint()
self.dump()
else:
log.send(self.name() + ' has not changed.', 2)
def clearDump(self):
if self.isDumped():
os.remove(self._path)
def load(self):
if self.isDumped():
log.send(self.name() + ' is dumped in ' + self._path + ': loading it.')
res = pickle.load(open(self._path, 'rb'))
## Now it should be a "self = res" but I currently don't
## trust that.
self._timestamp = res._timestamp
self._buildtime = res._buildtime
self._fingerprint = res._fingerprint
self.setDumpPath(res.getDumpPath())
self.setIsFile(res.isFile())
self.setValue(res.getValue())
else:
log.send(self.name() + ' is not dumped.', 2)
def isDumped(self):
log.send('Checking ' + str(self) + ' in file: ' + self._path, 3)
if os.path.exists(self._path):
log.send('Available ' + str(self), 3)
return True
log.send('Unavailable: ' + str(self), 3)
return False
def dump(self):
if not self._dodump:
log.send('Dumping is switched off, so skipping.', 2)
return
log.send('Dumping resource: ' + self._name ,2)
log.send('object: ' + str(self), 3)
log.send('value: ' + str(self._contents), 3)
log.send('fingerprint: ' + str(self._fingerprint), 3)
log.send('Dumping to file: ' + self._path, 2)
pickle.dump(self, open(self._path, 'wb'))
def isAvailable(self):
return self._contents != None
def setValue(self, v):
log.send('New value is: ' + str(v), 3)
self._contents = v
def getValue(self):
return self._contents
def setIsFile(self, isit = True):
log.send('isFile value: ' + str(isit), 3)
self._isfile = isit
def isFile(self):
return self._isfile
def setDumpPath(self, path):
log.send('Updating path: ' + str(path),2)
self._path = path
def getDumpPath(self):
return self._path
def changed(self):
return self._fingerprint != self._makeFingerprint(self._contents)
def _makeFingerprint(self, obj):
try:
inspect.getsource(obj)
log.send('Source got:', 3)
log.send(inspect.getsource(obj), 3)
return inspect.getsource(obj)
except Exception:
log.send('No source: passing object', 3)
return obj
def getFingerprint(self):
return self._fingerprint
def updateFingerprint(self):
self._fingerprint = self._makeFingerprint(self._contents)
log.send('Fingerprint is: ' + str(self._fingerprint), 3)
def name(self):
return self._name
def setDump(self, d):
self._dodump = d
_name = ''
_contents = None
_dodump = True
_fingerprint = None
_path = None
_isfile = False
_timestamp = None
_buildtime = None
|
mit
| 4,220,487,148,200,134,000
| 31.261146
| 83
| 0.576308
| false
| 4.138072
| false
| false
| false
|
cjayb/mne-python
|
mne/preprocessing/ssp.py
|
3
|
14022
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from ..epochs import Epochs
from ..proj import compute_proj_evoked, compute_proj_epochs
from ..utils import logger, verbose, warn
from ..io.pick import pick_types
from ..io import make_eeg_average_ref_proj
from .ecg import find_ecg_events
from .eog import find_eog_events
def _safe_del_key(dict_, key):
"""Aux function.
Use this function when preparing rejection parameters
instead of directly deleting keys.
"""
if key in dict_:
del dict_[key]
def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs, ch_name,
reject, flat, bads, avg_ref, no_proj, event_id,
exg_l_freq, exg_h_freq, tstart, qrs_threshold,
filter_method, iir_params, return_drop_log, copy,
meg, verbose):
"""Compute SSP/PCA projections for ECG or EOG artifacts."""
raw = raw.copy() if copy else raw
del copy
raw.load_data() # we will filter it later
if no_proj:
projs = []
else:
projs = cp.deepcopy(raw.info['projs'])
logger.info('Including %d SSP projectors from raw file'
% len(projs))
if avg_ref:
eeg_proj = make_eeg_average_ref_proj(raw.info)
projs.append(eeg_proj)
if raw_event is None:
raw_event = raw
assert mode in ('ECG', 'EOG') # internal function
logger.info('Running %s SSP computation' % mode)
if mode == 'ECG':
events, _, _ = find_ecg_events(raw_event, ch_name=ch_name,
event_id=event_id, l_freq=exg_l_freq,
h_freq=exg_h_freq, tstart=tstart,
qrs_threshold=qrs_threshold,
filter_length=filter_length)
else: # mode == 'EOG':
events = find_eog_events(raw_event, event_id=event_id,
l_freq=exg_l_freq, h_freq=exg_h_freq,
filter_length=filter_length, ch_name=ch_name,
tstart=tstart)
# Check to make sure we actually got at least one usable event
if events.shape[0] < 1:
warn('No %s events found, returning None for projs' % mode)
return (None, events) + (([],) if return_drop_log else ())
logger.info('Computing projector')
my_info = cp.deepcopy(raw.info)
my_info['bads'] += bads
# Handler rejection parameters
if reject is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eog')
if flat is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eog')
# exclude bad channels from projection
# keep reference channels if compensation channels are present
ref_meg = len(my_info['comps']) > 0
picks = pick_types(my_info, meg=True, eeg=True, eog=True, ecg=True,
ref_meg=ref_meg, exclude='bads')
raw.filter(l_freq, h_freq, picks=picks, filter_length=filter_length,
n_jobs=n_jobs, method=filter_method, iir_params=iir_params,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_design='firwin2')
epochs = Epochs(raw, events, None, tmin, tmax, baseline=None, preload=True,
picks=picks, reject=reject, flat=flat, proj=True)
drop_log = epochs.drop_log
if epochs.events.shape[0] < 1:
warn('No good epochs found, returning None for projs')
return (None, events) + ((drop_log,) if return_drop_log else ())
if average:
evoked = epochs.average()
ev_projs = compute_proj_evoked(evoked, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg, meg=meg)
else:
ev_projs = compute_proj_epochs(epochs, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg, n_jobs=n_jobs, meg=meg)
for p in ev_projs:
p['desc'] = mode + "-" + p['desc']
projs.extend(ev_projs)
logger.info('Done.')
return (projs, events) + ((drop_log,) if return_drop_log else ())
@verbose
def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=True, filter_length='10s', n_jobs=1,
ch_name=None, reject=dict(grad=2000e-13, mag=3000e-15,
eeg=50e-6, eog=250e-6),
flat=None, bads=[], avg_ref=False,
no_proj=False, event_id=999, ecg_l_freq=5, ecg_h_freq=35,
tstart=0., qrs_threshold='auto', filter_method='fir',
iir_params=None, copy=True, return_drop_log=False,
meg='separate', verbose=None):
"""Compute SSP/PCA projections for ECG artifacts.
.. note:: raw data will be loaded if it is not already.
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency for the data channels in Hz.
h_freq : float | None
Filter high cut-off frequency for the data channels in Hz.
average : bool
Compute SSP after averaging. Default is True.
filter_length : str | int | None
Number of taps to use for filtering.
%(n_jobs)s
ch_name : str | None
Channel to use for ECG detection (Required if no ECG found).
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
ecg_l_freq : float
Low pass frequency applied to the ECG channel for event detection.
ecg_h_freq : float
High pass frequency applied to the ECG channel for event detection.
tstart : float
Start artifact detection after tstart seconds.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
filter_method : str
Method for filtering ('iir' or 'fir').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
return_drop_log : bool
If True, return the drop log.
.. versionadded:: 0.15
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
proj : list
Computed SSP projectors.
ecg_events : ndarray
Detected ECG events.
drop_log : list
The drop log, if requested.
See Also
--------
find_ecg_events
create_ecg_epochs
Notes
-----
Filtering is applied to the ECG channel while finding events using
``ecg_l_freq`` and ``ecg_h_freq``, and then to the ``raw`` instance
using ``l_freq`` and ``h_freq`` before creation of the epochs used to
create the projectors.
"""
return _compute_exg_proj(
'ECG', raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
l_freq, h_freq, average, filter_length, n_jobs, ch_name, reject, flat,
bads, avg_ref, no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart,
qrs_threshold, filter_method, iir_params, return_drop_log, copy,
meg, verbose)
@verbose
def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=True, filter_length='10s', n_jobs=1,
reject=dict(grad=2000e-13, mag=3000e-15, eeg=500e-6,
eog=np.inf), flat=None, bads=[],
avg_ref=False, no_proj=False, event_id=998, eog_l_freq=1,
eog_h_freq=10, tstart=0., filter_method='fir',
iir_params=None, ch_name=None, copy=True,
return_drop_log=False, meg='separate', verbose=None):
"""Compute SSP/PCA projections for EOG artifacts.
.. note:: raw data must be preloaded.
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency for the data channels in Hz.
h_freq : float | None
Filter high cut-off frequency for the data channels in Hz.
average : bool
Compute SSP after averaging. Default is True.
filter_length : str | int | None
Number of taps to use for filtering.
%(n_jobs)s
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
eog_l_freq : float
Low pass frequency applied to the E0G channel for event detection.
eog_h_freq : float
High pass frequency applied to the EOG channel for event detection.
tstart : float
Start artifact detection after tstart seconds.
filter_method : str
Method for filtering ('iir' or 'fir').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
ch_name : str | None
If not None, specify EOG channel name.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
return_drop_log : bool
If True, return the drop log.
.. versionadded:: 0.15
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
proj: list
Computed SSP projectors.
eog_events: ndarray
Detected EOG events.
drop_log : list
The drop log, if requested.
See Also
--------
find_eog_events
create_eog_epochs
Notes
-----
Filtering is applied to the EOG channel while finding events using
``eog_l_freq`` and ``eog_h_freq``, and then to the ``raw`` instance
using ``l_freq`` and ``h_freq`` before creation of the epochs used to
create the projectors.
"""
return _compute_exg_proj(
'EOG', raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
l_freq, h_freq, average, filter_length, n_jobs, ch_name, reject, flat,
bads, avg_ref, no_proj, event_id, eog_l_freq, eog_h_freq, tstart,
'auto', filter_method, iir_params, return_drop_log, copy, meg,
verbose)
|
bsd-3-clause
| 1,329,410,272,909,505,500
| 37.726519
| 79
| 0.580569
| false
| 3.513534
| false
| false
| false
|
breakwang/pykit
|
jobq/jobq.py
|
1
|
3605
|
import logging
import sys
import threading
import time
import types
if sys.version_info[0] == 2:
import Queue
else:
import queue as Queue
logger = logging.getLogger(__name__)
class EmptyRst(object):
pass
class Finish(object):
pass
def run(input_it, workers, keep_order=False, timeout=None, probe=None):
endtime = time.time() + (timeout or 86400 * 365)
if probe is None:
probe = {}
sessions = []
probe['sessions'] = sessions
head_q = _make_q()
inq = head_q
for worker in workers + [_blackhole]:
if callable(worker):
worker = (worker, 1)
worker, n = worker
sess = {'worker': worker,
'threads': [],
'input': inq,
}
outq = _make_q()
if keep_order and n > 1:
# to maximize concurrency
sess['queue_of_outq'] = _make_q(n=1024 * 1024)
sess['lock'] = threading.RLock()
sess['coor_th'] = _thread(_coordinate, (sess, outq))
sess['threads'] = [_thread(_exec_in_order, (sess, _make_q()))
for ii in range(n)]
else:
sess['threads'] = [_thread(_exec, (sess, outq))
for ii in range(n)]
sessions.append(sess)
inq = outq
for args in input_it:
head_q.put(args)
for sess in sessions:
# put nr = len(threads) Finish
for th in sess['threads']:
sess['input'].put(Finish)
for th in sess['threads']:
th.join(endtime - time.time())
if 'queue_of_outq' in sess:
sess['queue_of_outq'].put(Finish)
sess['coor_th'].join(endtime - time.time())
def stat(probe):
rst = []
for sess in probe['sessions']:
o = {}
wk = sess['worker']
o['name'] = wk.__module__ + ":" + wk.__name__
o['input'] = _q_stat(sess['input'])
if 'queue_of_outq' in sess:
o['coordinator'] = _q_stat(sess['queue_of_outq'])
rst.append(o)
return rst
def _q_stat(q):
return {'size': q.qsize(),
'capa': q.maxsize
}
def _exec(sess, output_q):
while True:
args = sess['input'].get()
if args is Finish:
return
try:
rst = sess['worker'](args)
except Exception as e:
logger.exception(repr(e))
continue
_put_rst(output_q, rst)
def _exec_in_order(sess, output_q):
while True:
with sess['lock']:
args = sess['input'].get()
if args is Finish:
return
sess['queue_of_outq'].put(output_q)
try:
rst = sess['worker'](args)
except Exception as e:
logger.exception(repr(e))
output_q.put(EmptyRst)
continue
output_q.put(rst)
def _coordinate(sess, output_q):
while True:
outq = sess['queue_of_outq'].get()
if outq is Finish:
return
_put_rst(output_q, outq.get())
def _put_rst(output_q, rst):
if type(rst) == types.GeneratorType:
for rr in rst:
_put_non_empty(output_q, rr)
else:
_put_non_empty(output_q, rst)
def _blackhole(args):
return EmptyRst
def _put_non_empty(q, val):
if val is not EmptyRst:
q.put(val)
def _make_q(n=1024):
return Queue.Queue(n)
def _thread(func, args):
th = threading.Thread(target=func,
args=args)
th.daemon = True
th.start()
return th
|
mit
| 250,652,157,187,175,260
| 19.027778
| 73
| 0.5043
| false
| 3.565776
| false
| false
| false
|
robert-b-clarke/nre-darwin-py
|
nredarwin/cli.py
|
1
|
1612
|
import argparse
from nredarwin.webservice import DarwinLdbSession
import csv
import sys
from tabulate import tabulate
from functools import partial
def rows_to_display(station_board):
"""
Iterator for tabular output of board
"""
yield (("Platform", "Destination", "Scheduled", "Due"))
for service in station_board.train_services:
yield (
service.platform,
service.destination_text,
service.std,
service.etd,
)
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"station", type=str, help="station CRS code, e.g. MAN for Manchester Piccadilly"
)
ap.add_argument(
"--destination",
type=str,
required=False,
help="Only include services travelling to this CRS code, e.g HUD",
)
ap.add_argument("--csv", action="store_true", help="output in csv format")
args = ap.parse_args()
darwin_session = DarwinLdbSession(
wsdl="https://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx"
)
# build up query
board_query = partial(darwin_session.get_station_board, args.station)
if args.destination:
board_query = partial(board_query, destination_crs=args.destination)
# convert to tabular data for display
board_rows = rows_to_display(board_query())
# output CSV if requested
if args.csv:
output_writer = csv.writer(sys.stdout, dialect="unix")
output_writer.writerows(board_rows)
return
# Otherwise output human readable table
print(tabulate(board_rows, headers="firstrow"))
|
bsd-3-clause
| 8,184,569,043,009,174,000
| 28.851852
| 88
| 0.651985
| false
| 3.801887
| false
| false
| false
|
GraphProcessor/CommunityDetectionCodes
|
Algorithms/2014-Heat-Kernel/src_python/demo_files/yche_numerical_linear_algebra_exp.py
|
1
|
2192
|
from __future__ import print_function
import numpy as np
def demo_gauss_sedel_method():
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0.0, 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
# prints the system
print("System:")
for i in range(A.shape[0]):
row = ["{}*x{}".format(A[i, j], j + 1) for j in range(A.shape[1])]
print(" + ".join(row), "=", b[i])
print()
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
print("Solution:")
print(x)
error = np.dot(A, x) - b
print("Error:")
print(error)
def demo_jacobi_method():
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0.0, 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
# prints the system
print("System:")
for i in range(A.shape[0]):
row = ["{}*x{}".format(A[i, j], j + 1) for j in range(A.shape[1])]
print(" + ".join(row), "=", b[i])
print()
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, atol=1e-10):
break
x = x_new
print("Solution:")
print(x)
error = np.dot(A, x) - b
print("Error:")
print(error)
if __name__ == '__main__':
demo_gauss_sedel_method()
demo_jacobi_method()
|
gpl-2.0
| -273,464,835,275,803,520
| 24.488372
| 74
| 0.447993
| false
| 2.907162
| false
| false
| false
|
centrofermi/e3pipe
|
dst/E3DstWeatherTree.py
|
1
|
1873
|
#!/usr/bin/env python
# *********************************************************************
# * Copyright (C) 2014 Luca Baldini (luca.baldini@pi.infn.it) *
# * *
# * For the license terms see the file LICENSE, distributed *
# * along with this software. *
# *********************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from e3pipe.root.E3Tree import E3Tree
from e3pipe.root.E3BranchDescriptor import E3BranchDescriptor
class E3DstWeatherTree(E3Tree):
""" Class describing the ROOT tree containing the run-by-run header
information.
"""
NAME = 'Weather'
BRANCHES = [E3BranchDescriptor('Seconds', 'i'),
E3BranchDescriptor('IndoorTemperature', 'F'),
E3BranchDescriptor('OutdoorTemperature', 'F'),
E3BranchDescriptor('Pressure', 'F')
]
def __init__(self):
""" Constructor.
"""
E3Tree.__init__(self, 'Weather tree')
def test():
"""
"""
tree = E3DstWeatherTree()
if __name__ == '__main__':
test()
|
gpl-3.0
| -5,012,376,391,257,592,000
| 32.446429
| 73
| 0.577149
| false
| 4.286041
| false
| false
| false
|
borgarlie/TDT4501-Specialization-Project
|
research/calculate_accuracy.py
|
1
|
2242
|
import numpy as np
from tensorboardX import SummaryWriter
from seq2seq_summarization.globals import *
from classifier.train_classifier import get_predictions, calculate_accuracy, create_single_article_category_list
from research.train import split_category_and_article, category_from_string, evaluate
def test_accuracy(config, articles, vocabulary, encoder, decoder, classifier, max_length):
print("Testing accuracy", flush=True)
writer = SummaryWriter('../log/test_accuracy1')
categories_total = []
categories_scores_total = []
print("Generating beams", flush=True)
for i in range(len(articles)):
print("Evaluating article nr: %d" % i, flush=True)
category, input_sentence = split_category_and_article(articles[i])
category = category.strip()
category_variable = category_from_string(category)
categories = [category_variable]
categories_var = Variable(torch.FloatTensor(categories))
if use_cuda:
categories_var = categories_var.cuda()
output_beams = evaluate(config, vocabulary, encoder, decoder, input_sentence, categories_var, max_length)
top1_beam = output_beams[0]
top1_sequence_output = top1_beam.decoded_word_sequence
output_sentence = ' '.join(top1_sequence_output[:-1])
sequence = indexes_from_sentence(vocabulary, output_sentence)
sequence = Variable(torch.LongTensor([sequence]))
if use_cuda:
sequence = sequence.cuda()
category = create_single_article_category_list(category)
categories_total.append(category)
categories_scores = get_category_scores(sequence, classifier)
categories_scores_total.append(categories_scores)
print("Calculating accuracy", flush=True)
np_gold_truth = np.array(categories_total)
print(np.shape(np_gold_truth), flush=True)
np_predicted = get_predictions(categories_scores_total, 0.00)
print(np.shape(np_predicted), flush=True)
epoch = 999 # random
calculate_accuracy(np_gold_truth, np_predicted, writer, epoch)
def get_category_scores(sequence, classifier):
categories_scores = classifier(sequence, mode='Test')
return categories_scores.data.cpu().numpy()[0]
|
mit
| -9,058,002,817,343,573,000
| 39.035714
| 113
| 0.702498
| false
| 3.975177
| false
| false
| false
|
icebreaker/dotfiles
|
gnome/gnome2/gedit/plugins.symlink/classbrowser/parser_ruby.py
|
1
|
14401
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Frederic Back (fredericback@gmail.com)
# Copyright (C) 2007 Kristoffer Lundén (kristoffer.lunden@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
import gtk
import gobject
import pango
import os
import re
import options
from parserinterface import ClassParserInterface
import imagelibrary
#===============================================================================
def tokenFromString(string):
""" Parse a string containing a function or class definition and return
a tuple containing information about the function, or None if the
parsing failed.
Example:
"#def foo(bar):" would return :
{'comment':True,'type':"def",'name':"foo",'params':"bar" } """
try:
e = r"([# ]*?)([a-zA-Z0-9_]+)( +)([a-zA-Z0-9_\?\!<>\+=\.]+)(.*)"
r = re.match(e,string).groups()
token = Token()
token.comment = '#' in r[0]
token.type = r[1]
token.name = r[3]
token.params = r[4]
token.original = string
return token
except: return None # return None to skip if unable to parse
def test():
pass
#===============================================================================
class Token:
def __init__(self):
self.type = None
self.original = None # the line in the file, unparsed
self.indent = 0
self.name = None
self.comment = False # if true, the token is commented, ie. inactive
self.params = None # string containing additional info
self.expanded = False
self.access = "public"
# start and end points
self.start = 0
self.end = 0
self.rubyfile = None
self.path = None # save the position in the browser
self.parent = None
self.children = []
def get_endline(self):
""" Get the line number where this token's declaration, including all
its children, finishes. Use it for copy operations."""
if len(self.children) > 0:
return self.children[-1].get_endline()
return self.end
def test_nested():
pass
def get_toplevel_class(self):
""" Try to get the class a token is in. """
if self.type == "class":
return self
if self.parent is not None:
tc = self.parent.get_toplevel_class()
if tc is None or tc.type == "file": return self #hack
else: return tc
return None
def printout(self):
for r in range(self.indent): print "",
print self.name,
if self.parent: print " (parent: ",self.parent.name
else: print
for tok in self.children: tok.printout()
#===============================================================================
class RubyFile(Token):
""" A class that represents a ruby file.
Manages "tokens", ie. classes and functions."""
def __init__(self, doc):
Token.__init__(self)
self.doc = doc
self.uri = doc.get_uri()
self.linestotal = 0 # total line count
self.type = "file"
self.name = os.path.basename(self.uri)
self.tokens = []
def getTokenAtLine(self, line):
""" get the token at the specified line number """
for token in self.tokens:
if token.start <= line and token.end > line:
return self.__findInnermostTokenAtLine(token, line)
return None
def __findInnermostTokenAtLine(self, token, line):
"""" ruby is parsed as nested, unlike python """
for child in token.children:
if child.start <= line and child.end > line:
return self.__findInnermostTokenAtLine(child, line)
return token
def parse(self, verbose=True):
#if verbose: print "parse ----------------------------------------------"
newtokenlist = []
self.children = []
currentParent = self
self.linestotal = self.doc.get_line_count()
text = self.doc.get_text(*self.doc.get_bounds())
linecount = -1
ends_to_skip = 0
access = "public"
for line in text.splitlines():
linecount += 1
lstrip = line.lstrip()
ln = lstrip.split()
if len(ln) == 0: continue
if ln[0] == '#': continue
if ln[0] in ("class","module","def"):
token = tokenFromString(lstrip)
if token is None: continue
token.rubyfile = self
token.start = linecount
if token.type == "def":
token.access = access
#print "line",linecount
#print "name", token.name
#print "type",token.type
#print "access",token.access
#print "to",currentParent.name
currentParent.children.append(token)
token.parent = currentParent
currentParent = token
newtokenlist.append(token)
idx = len(newtokenlist) - 1
if idx < len(self.tokens):
if newtokenlist[idx].original == self.tokens[idx].original:
newtokenlist[idx].expanded = self.tokens[idx].expanded
elif ln[0] in("begin","while","until","case","if","unless","for"):
ends_to_skip += 1
elif ln[0] in ("attr_reader","attr_writer","attr_accessor"):
for attr in ln:
m = re.match(r":(\w+)",attr)
if m:
token = Token()
token.rubyfile = self
token.type = 'def'
token.name = m.group(1)
token.start = linecount
token.end = linecount
token.original = lstrip
currentParent.children.append(token)
token.parent = currentParent
newtokenlist.append(token)
elif re.search(r"\sdo(\s+\|.*?\|)?\s*(#|$)", line):
#print "do",line
# Support for new style RSpec
if re.match(r"^(describe|it|before|after)\b", ln[0]):
token = Token()
token.rubyfile = self
token.start = linecount
if currentParent.type == "describe":
if ln[0] == "it":
token.name = " ".join(ln[1:-1])
else:
token.name = ln[0]
token.type = "def"
elif ln[0] == "describe":
token.type = "describe"
token.name = " ".join(ln[1:-1])
else:
continue
currentParent.children.append(token)
token.parent = currentParent
currentParent = token
newtokenlist.append(token)
# Deprectated support for old style RSpec, will be removed later
elif ln[0] in ("context","specify","setup","teardown","context_setup","context_teardown"):
token = Token()
token.rubyfile = self
token.start = linecount
if currentParent.type == "context":
if ln[0] == "specify":
token.name = " ".join(ln[1:-1])
else:
token.name = ln[0]
token.type = "def"
elif ln[0] == "context":
token.type = "context"
token.name = " ".join(ln[1:-1])
else:
continue
currentParent.children.append(token)
token.parent = currentParent
currentParent = token
newtokenlist.append(token)
else:
ends_to_skip += 1
elif ln[0] in ("public","private","protected"):
if len(ln) == 1:
access = ln[0]
if re.search(r";?\s*end(?:\s*$|\s+(?:while|until))", line):
if ends_to_skip > 0:
ends_to_skip -= 1
else:
token = currentParent
#print "end",currentParent.name
token.end = linecount
currentParent = token.parent
# set new token list
self.tokens = newtokenlist
return True
#===============================================================================
class RubyParser( ClassParserInterface ):
def __init__(self):
self.rubyfile = None
def appendTokenToBrowser(self, token, parentit ):
it = self.__browsermodel.append(parentit,(token,))
token.path = self.__browsermodel.get_path(it)
#print token.path
#if token.parent:
# if token.parent.expanded:
# self.browser.expand_row(token.parent.path,False)
# pass
for child in token.children:
self.appendTokenToBrowser(child, it)
def parse(self, doc):
"""
Create a gtk.TreeModel with the class elements of the document
The parser uses the ctags command from the shell to create a ctags file,
then parses the file, and finally populates a treemodel.
"""
self.rubyfile = RubyFile(doc)
self.rubyfile.parse(options.singleton().verbose)
self.__browsermodel = gtk.TreeStore(gobject.TYPE_PYOBJECT)
for child in self.rubyfile.children:
self.appendTokenToBrowser(child,None)
return self.__browsermodel
def __private_test_method(self):
pass
def get_tag_position(self, model, path):
tok = model.get_value( model.get_iter(path), 0 )
try: return tok.rubyfile.uri, tok.start+1
except: return None
def current_line_changed(self, model, doc, line):
# parse again if line count changed
if abs(self.rubyfile.linestotal - doc.get_line_count()) > 0:
if abs(self.rubyfile.linestotal - doc.get_line_count()) > 5:
if options.singleton().verbose:
print "RubyParser: refresh because line dif > 5"
self.rubyfile.parse()
else:
it = doc.get_iter_at_line(line)
a = it.copy(); b = it.copy()
a.backward_line(); a.backward_line()
b.forward_line(); b.forward_line()
t = doc.get_text(a,b)
if t.find("class") >= 0 or t.find("def") >= 0:
if options.singleton().verbose:
print "RubyParser: refresh because line cound changed near keyword"
self.rubyfile.parse()
def get_tag_at_line(self, model, doc, linenumber):
t = self.rubyfile.getTokenAtLine(linenumber)
#print linenumber,t
if t: return t.path
def cellrenderer(self, column, ctr, model, it):
""" Render the browser cell according to the token it represents. """
tok = model.get_value(it,0)
weight = 400
style = pango.STYLE_NORMAL
name = tok.name#+tok.params
colour = options.singleton().colours[ "function" ]
# set label and colour
if tok.type == "class":
name = "class "+name
colour = options.singleton().colours[ "class" ]
weight = 600
elif tok.type == "module":
name = "module "+name
colour = options.singleton().colours[ "namespace" ]
weight = 600
# new style RSpec
elif tok.type == "describe":
name = "describe "+name
colour = options.singleton().colours[ "namespace" ]
weight = 600
# Old style RSpec, deprecated
elif tok.type == "context":
name = "context "+name
colour = options.singleton().colours[ "namespace" ]
weight = 600
elif tok.type == "def":
colour = options.singleton().colours[ "member" ]
if tok.comment: name = "#"+name
# assing properties
ctr.set_property("text", name)
ctr.set_property("style", style)
ctr.set_property("foreground-gdk", colour)
def pixbufrenderer(self, column, crp, model, it):
tok = model.get_value(it,0)
icon = "default"
if tok.type == "class":
icon = "class"
elif tok.type == "module":
icon = "namespace"
elif tok.type == "describe":
icon = "namespace"
elif tok.type == "context":
icon = "namespace"
elif tok.type == "def":
if tok.access == "public":
icon = "method"
elif tok.access == "protected":
icon = "method_prot"
elif tok.access == "private":
icon = "method_priv"
crp.set_property("pixbuf",imagelibrary.pixbufs[icon])
|
mit
| 4,563,122,297,649,822,700
| 33.615385
| 106
| 0.491319
| false
| 4.532578
| false
| false
| false
|
rho2/30DaysOfCode
|
day18.py
|
1
|
1039
|
import sys
class Solution:
def __init__(self):
self.stack = list()
self.queue = list()
def pushCharacter(self, char):
self.stack.append(char)
def popCharacter(self):
return(self.stack.pop(-1))
def enqueueCharacter(self, char):
self.queue.append(char)
def dequeueCharacter(self):
return(self.queue.pop(0))
# read the string s
s=input()
#Create the Solution class object
obj=Solution()
l=len(s)
# push/enqueue all the characters of string s to stack
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome=True
'''
pop the top character from stack
dequeue the first character from queue
compare both the characters
'''
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome=False
break
#finally print whether string s is palindrome or not.
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.")
|
mit
| -2,207,952,508,544,627,000
| 22.088889
| 54
| 0.647738
| false
| 3.486577
| false
| false
| false
|
sealevelresearch/tide-wrangler
|
tide_wrangler/scratch/convert_garston_to_csv.py
|
1
|
1918
|
#!/usr/bin/env python
import pytz
import datetime
import csv
from os import path
from collections import namedtuple
Row = namedtuple('Row', 'when,height_m')
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
_NAIVE_DATETIME_FORMAT = '%y%m%dT%H:%M:00Z'
_FIELDNAMES = ['datetime', 'observed_sea_level']
def main(filenames):
for in_filename in filenames:
csv_filename = make_output_filename(in_filename)
date_str = get_date_str_from_input_filename(in_filename)
convert_file(in_filename, csv_filename, date_str)
def make_output_filename(in_filename):
"""
>>> make_output_filename('/tmp/data.GAR')
'/tmp/data.GAR.csv'
"""
return in_filename + '.csv'
def get_date_str_from_input_filename(in_filename):
"""
>>> get_date_str_from_input_filename('/tmp/13120610.GAR')
'131206'
"""
return path.basename(path.splitext(in_filename)[0])[:-2]
def convert_file(in_filename, csv_filename, date_str):
with open(in_filename, 'r') as f, open(csv_filename, 'w') as g:
csvreader = csv.DictReader(f, fieldnames=_FIELDNAMES)
csvwriter = csv.DictWriter(g, fieldnames=_FIELDNAMES)
csvwriter.writeheader()
count = 1
for line in csvreader:
row = parse_line(line, date_str)
csvwriter.writerow(
{_FIELDNAMES[0]: row[0].strftime(DATETIME_FORMAT),
_FIELDNAMES[1]: row[1]})
count += 1
if count % 50000 == 0:
print(count)
print('Converted {} lines.'.format(count))
def parse_line(line, date_str):
datetime_str = date_str + "T" + line["datetime"] + "Z"
when = datetime.datetime.strptime(
datetime_str, _NAIVE_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
height_m = float(line["observed_sea_level"])
return [when, height_m]
if __name__ == '__main__':
import sys
filenames = sys.argv[1:]
main(filenames)
|
mit
| -227,173,322,786,278,400
| 26.797101
| 70
| 0.616788
| false
| 3.312608
| false
| false
| false
|
z-plot/z-plot
|
examples/barplots/manybars.py
|
1
|
1257
|
#! /usr/bin/env python
import sys
from zplot import *
bartypes = [('hline', 1, 1),
('vline', 1, 1),
('hvline', 1, 1),
('dline1', 1, 2),
('dline2', 1, 2),
('dline12', 0.5, 2),
('circle', 1, 2),
('square', 1, 1),
('triangle', 2, 2),
('utriangle', 2, 2)]
bartypes = [('hline', 1, 1),
('vline', 1, 1),
('hvline', 1, 1),
('dline1', 1, 2),
('dline2', 1, 2),
('dline12', 0.5, 2),
('circle', 1, 2),
('square', 1, 1),
('triangle', 2, 2),
('utriangle', 2, 2)]
L = len(bartypes)
ctype = 'eps' if len(sys.argv) < 2 else sys.argv[1]
c = canvas(ctype, title='manybars', dimensions=[L*10, 110])
print(c.version)
d = drawable(canvas=c, xrange=[0,L+1], yrange=[0,10], coord=[0,5],
dimensions=[L*10,100])
t = table(file='manybars.data')
p = plotter()
for btype, fsize, fskip in bartypes:
p.verticalbars(drawable=d, table=t, xfield='c0', yfield='c1', fill=True,
fillcolor='darkgray', fillstyle=btype, barwidth=0.9,
fillsize=fsize, fillskip=fskip)
t.update(set='c0=c0+1')
c.render()
|
bsd-3-clause
| -2,772,117,638,909,082,600
| 22.716981
| 76
| 0.461416
| false
| 2.916473
| false
| false
| false
|
binhqnguyen/lena
|
test.py
|
1
|
75970
|
#! /usr/bin/env python26
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2009 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import sys
import time
import optparse
import subprocess
import threading
import Queue
import signal
import xml.dom.minidom
import shutil
import re
from utils import get_list_from_file
#
# XXX This should really be part of a waf command to list the configuration
# items relative to optional ns-3 pieces.
#
# A list of interesting configuration items in the waf configuration
# cache which we may be interested in when deciding on which examples
# to run and how to run them. These are set by waf during the
# configuration phase and the corresponding assignments are usually
# found in the associated subdirectory wscript files.
#
interesting_config_items = [
"NS3_ENABLED_MODULES",
"NS3_MODULE_PATH",
"NSC_ENABLED",
"ENABLE_REAL_TIME",
"ENABLE_THREADING",
"ENABLE_EXAMPLES",
"ENABLE_TESTS",
"EXAMPLE_DIRECTORIES",
"ENABLE_PYTHON_BINDINGS",
"ENABLE_CLICK",
"ENABLE_BRITE",
"ENABLE_OPENFLOW",
"APPNAME",
"BUILD_PROFILE",
"VERSION",
"PYTHON",
"VALGRIND_FOUND",
]
NSC_ENABLED = False
ENABLE_REAL_TIME = False
ENABLE_THREADING = False
ENABLE_EXAMPLES = True
ENABLE_TESTS = True
ENABLE_CLICK = False
ENABLE_BRITE = False
ENABLE_OPENFLOW = False
EXAMPLE_DIRECTORIES = []
APPNAME = ""
BUILD_PROFILE = ""
BUILD_PROFILE_SUFFIX = ""
VERSION = ""
PYTHON = ""
VALGRIND_FOUND = True
#
# This will be given a prefix and a suffix when the waf config file is
# read.
#
test_runner_name = "test-runner"
#
# If the user has constrained us to run certain kinds of tests, we can tell waf
# to only build
#
core_kinds = ["bvt", "core", "performance", "system", "unit"]
#
# There are some special cases for test suites that kill valgrind. This is
# because NSC causes illegal instruction crashes when run under valgrind.
#
core_valgrind_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
"routing-click",
"lte-rr-ff-mac-scheduler",
"lte-tdmt-ff-mac-scheduler",
"lte-fdmt-ff-mac-scheduler",
"lte-pf-ff-mac-scheduler",
"lte-tta-ff-mac-scheduler",
"lte-fdbet-ff-mac-scheduler",
"lte-ttbet-ff-mac-scheduler",
"lte-fdtbfq-ff-mac-scheduler",
"lte-tdtbfq-ff-mac-scheduler",
"lte-pss-ff-mac-scheduler",
]
#
# There are some special cases for test suites that fail when NSC is
# missing.
#
core_nsc_missing_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
]
#
# Parse the examples-to-run file if it exists.
#
# This function adds any C++ examples or Python examples that are to be run
# to the lists in example_tests and python_tests, respectively.
#
def parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests):
# Look for the examples-to-run file exists.
if os.path.exists(examples_to_run_path):
# Each tuple in the C++ list of examples to run contains
#
# (example_name, do_run, do_valgrind_run)
#
# where example_name is the executable to be run, do_run is a
# condition under which to run the example, and do_valgrind_run is
# a condition under which to run the example under valgrind. This
# is needed because NSC causes illegal instruction crashes with
# some tests when they are run under valgrind.
#
# Note that the two conditions are Python statements that
# can depend on waf configuration variables. For example,
#
# ("tcp-nsc-lfn", "NSC_ENABLED == True", "NSC_ENABLED == False"),
#
cpp_examples = get_list_from_file(examples_to_run_path, "cpp_examples")
for example_name, do_run, do_valgrind_run in cpp_examples:
# Seperate the example name from its arguments.
example_name_original = example_name
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
example_name = "%s%s-%s%s" % (APPNAME, VERSION, example_name, BUILD_PROFILE_SUFFIX)
# Set the full path for the example.
example_path = os.path.join(cpp_executable_dir, example_name)
# Add all of the C++ examples that were built, i.e. found
# in the directory, to the list of C++ examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
# Add this example.
example_tests.append((example_path, do_run, do_valgrind_run))
example_names_original.append(example_name_original)
# Each tuple in the Python list of examples to run contains
#
# (example_name, do_run)
#
# where example_name is the Python script to be run and
# do_run is a condition under which to run the example.
#
# Note that the condition is a Python statement that can
# depend on waf configuration variables. For example,
#
# ("realtime-udp-echo.py", "ENABLE_REAL_TIME == True"),
#
python_examples = get_list_from_file(examples_to_run_path, "python_examples")
for example_name, do_run in python_examples:
# Seperate the example name from its arguments.
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Set the full path for the example.
example_path = os.path.join(python_script_dir, example_name)
# Add all of the Python examples that were found to the
# list of Python examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
# Add this example.
python_tests.append((example_path, do_run))
#
# The test suites are going to want to output status. They are running
# concurrently. This means that unless we are careful, the output of
# the test suites will be interleaved. Rather than introducing a lock
# file that could unintentionally start serializing execution, we ask
# the tests to write their output to a temporary directory and then
# put together the final output file when we "join" the test tasks back
# to the main thread. In addition to this issue, the example programs
# often write lots and lots of trace files which we will just ignore.
# We put all of them into the temp directory as well, so they can be
# easily deleted.
#
TMP_OUTPUT_DIR = "testpy-output"
def read_test(test):
result = test.find('Result').text
name = test.find('Name').text
if not test.find('Time') is None:
time_real = test.find('Time').get('real')
else:
time_real = ''
return (result, name, time_real)
#
# A simple example of writing a text file with a test result summary. It is
# expected that this output will be fine for developers looking for problems.
#
def node_to_text (test, f):
(result, name, time_real) = read_test(test)
output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time_real)
f.write(output)
for details in test.findall('FailureDetails'):
f.write(" Details:\n")
f.write(" Message: %s\n" % details.find('Message').text)
f.write(" Condition: %s\n" % details.find('Condition').text)
f.write(" Actual: %s\n" % details.find('Actual').text)
f.write(" Limit: %s\n" % details.find('Limit').text)
f.write(" File: %s\n" % details.find('File').text)
f.write(" Line: %s\n" % details.find('Line').text)
for child in test.findall('Test'):
node_to_text(child, f)
def translate_to_text(results_file, text_file):
f = open(text_file, 'w')
import xml.etree.ElementTree as ET
et = ET.parse (results_file)
for test in et.findall('Test'):
node_to_text (test, f)
for example in et.findall('Example'):
result = example.find('Result').text
name = example.find('Name').text
if not example.find('Time') is None:
time_real = example.find('Time').get('real')
else:
time_real = ''
output = "%s: Example \"%s\" (%s)\n" % (result, name, time_real)
f.write(output)
f.close()
#
# A simple example of writing an HTML file with a test result summary. It is
# expected that this will eventually be made prettier as time progresses and
# we have time to tweak it. This may end up being moved to a separate module
# since it will probably grow over time.
#
def translate_to_html(results_file, html_file):
f = open(html_file, 'w')
f.write("<html>\n")
f.write("<body>\n")
f.write("<center><h1>ns-3 Test Results</h1></center>\n")
#
# Read and parse the whole results file.
#
import xml.etree.ElementTree as ET
et = ET.parse(results_file)
#
# Iterate through the test suites
#
f.write("<h2>Test Suites</h2>\n")
for suite in et.findall('Test'):
#
# For each test suite, get its name, result and execution time info
#
(result, name, time) = read_test (suite)
#
# Print a level three header with the result, name and time. If the
# test suite passed, the header is printed in green. If the suite was
# skipped, print it in orange, otherwise assume something bad happened
# and print in red.
#
if result == "PASS":
f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
elif result == "SKIP":
f.write("<h3 style=\"color:#ff6600\">%s: %s (%s)</h3>\n" % (result, name, time))
else:
f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))
#
# The test case information goes in a table.
#
f.write("<table border=\"1\">\n")
#
# The first column of the table has the heading Result
#
f.write("<th> Result </th>\n")
#
# If the suite crashed or is skipped, there is no further information, so just
# delare a new table row with the result (CRASH or SKIP) in it. Looks like:
#
# +--------+
# | Result |
# +--------+
# | CRASH |
# +--------+
#
# Then go on to the next test suite. Valgrind and skipped errors look the same.
#
if result in ["CRASH", "SKIP", "VALGR"]:
f.write("<tr>\n")
if result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("</tr>\n")
f.write("</table>\n")
continue
#
# If the suite didn't crash, we expect more information, so fill out
# the table heading row. Like,
#
# +--------+----------------+------+
# | Result | Test Case Name | Time |
# +--------+----------------+------+
#
f.write("<th>Test Case Name</th>\n")
f.write("<th> Time </th>\n")
#
# If the test case failed, we need to print out some failure details
# so extend the heading row again. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
#
if result == "FAIL":
f.write("<th>Failure Details</th>\n")
#
# Now iterate through all of the test cases.
#
for case in suite.findall('Test'):
#
# Get the name, result and timing information from xml to use in
# printing table below.
#
(result, name, time) = read_test(case)
#
# If the test case failed, we iterate through possibly multiple
# failure details
#
if result == "FAIL":
#
# There can be multiple failures for each test case. The first
# row always gets the result, name and timing information along
# with the failure details. Remaining failures don't duplicate
# this information but just get blanks for readability. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
# | FAIL | The name | time | It's busted |
# +--------+----------------+------+-----------------+
# | | | | Really broken |
# +--------+----------------+------+-----------------+
# | | | | Busted bad |
# +--------+----------------+------+-----------------+
#
first_row = True
for details in case.findall('FailureDetails'):
#
# Start a new row in the table for each possible Failure Detail
#
f.write("<tr>\n")
if first_row:
first_row = False
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
else:
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td>")
f.write("<b>Message: </b>%s, " % details.find('Message').text)
f.write("<b>Condition: </b>%s, " % details.find('Condition').text)
f.write("<b>Actual: </b>%s, " % details.find('Actual').text)
f.write("<b>Limit: </b>%s, " % details.find('Limit').text)
f.write("<b>File: </b>%s, " % details.find('File').text)
f.write("<b>Line: </b>%s" % details.find('Line').text)
f.write("</td>\n")
#
# End the table row
#
f.write("</td>\n")
else:
#
# If this particular test case passed, then we just print the PASS
# result in green, followed by the test case name and its execution
# time information. These go off in <td> ... </td> table data.
# The details table entry is left blank.
#
# +--------+----------------+------+---------+
# | Result | Test Case Name | Time | Details |
# +--------+----------------+------+---------+
# | PASS | The name | time | |
# +--------+----------------+------+---------+
#
f.write("<tr>\n")
f.write("<td style=\"color:green\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
f.write("<td></td>\n")
f.write("</tr>\n")
#
# All of the rows are written, so we need to end the table.
#
f.write("</table>\n")
#
# That's it for all of the test suites. Now we have to do something about
# our examples.
#
f.write("<h2>Examples</h2>\n")
#
# Example status is rendered in a table just like the suites.
#
f.write("<table border=\"1\">\n")
#
# The table headings look like,
#
# +--------+--------------+--------------+
# | Result | Example Name | Elapsed Time |
# +--------+--------------+--------------+
#
f.write("<th> Result </th>\n")
f.write("<th>Example Name</th>\n")
f.write("<th>Elapsed Time</th>\n")
#
# Now iterate through all of the examples
#
for example in et.findall("Example"):
#
# Start a new row for each example
#
f.write("<tr>\n")
#
# Get the result and name of the example in question
#
(result, name, time) = read_test(example)
#
# If the example either failed or crashed, print its result status
# in red; otherwise green. This goes in a <td> ... </td> table data
#
if result == "PASS":
f.write("<td style=\"color:green\">%s</td>\n" % result)
elif result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
#
# Write the example name as a new tag data.
#
f.write("<td>%s</td>\n" % name)
#
# Write the elapsed time as a new tag data.
#
f.write("<td>%s</td>\n" % time)
#
# That's it for the current example, so terminate the row.
#
f.write("</tr>\n")
#
# That's it for the table of examples, so terminate the table.
#
f.write("</table>\n")
#
# And that's it for the report, so finish up.
#
f.write("</body>\n")
f.write("</html>\n")
f.close()
#
# Python Control-C handling is broken in the presence of multiple threads.
# Signals get delivered to the runnable/running thread by default and if
# it is blocked, the signal is simply ignored. So we hook sigint and set
# a global variable telling the system to shut down gracefully.
#
thread_exit = False
def sigint_hook(signal, frame):
global thread_exit
thread_exit = True
return 0
#
# In general, the build process itself naturally takes care of figuring out
# which tests are built into the test runner. For example, if waf configure
# determines that ENABLE_EMU is false due to some missing dependency,
# the tests for the emu net device simply will not be built and will
# therefore not be included in the built test runner.
#
# Examples, however, are a different story. In that case, we are just given
# a list of examples that could be run. Instead of just failing, for example,
# nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
# for relevant configuration items.
#
# XXX This function pokes around in the waf internal state file. To be a
# little less hacky, we should add a commmand to waf to return this info
# and use that result.
#
def read_waf_config():
for line in open(".lock-waf_" + sys.platform + "_build", "rt"):
if line.startswith("top_dir ="):
key, val = line.split('=')
top_dir = eval(val.strip())
if line.startswith("out_dir ="):
key, val = line.split('=')
out_dir = eval(val.strip())
global NS3_BASEDIR
NS3_BASEDIR = top_dir
global NS3_BUILDDIR
NS3_BUILDDIR = out_dir
for line in open("%s/c4che/_cache.py" % out_dir).readlines():
for item in interesting_config_items:
if line.startswith(item):
exec(line, globals())
if options.verbose:
for item in interesting_config_items:
print "%s ==" % item, eval(item)
#
# It seems pointless to fork a process to run waf to fork a process to run
# the test runner, so we just run the test runner directly. The main thing
# that waf would do for us would be to sort out the shared library path but
# we can deal with that easily and do here.
#
# There can be many different ns-3 repositories on a system, and each has
# its own shared libraries, so ns-3 doesn't hardcode a shared library search
# path -- it is cooked up dynamically, so we do that too.
#
def make_paths():
have_DYLD_LIBRARY_PATH = False
have_LD_LIBRARY_PATH = False
have_PATH = False
have_PYTHONPATH = False
keys = os.environ.keys()
for key in keys:
if key == "DYLD_LIBRARY_PATH":
have_DYLD_LIBRARY_PATH = True
if key == "LD_LIBRARY_PATH":
have_LD_LIBRARY_PATH = True
if key == "PATH":
have_PATH = True
if key == "PYTHONPATH":
have_PYTHONPATH = True
pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, "bindings", "python")
if not have_PYTHONPATH:
os.environ["PYTHONPATH"] = pypath
else:
os.environ["PYTHONPATH"] += ":" + pypath
if options.verbose:
print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"]
if sys.platform == "darwin":
if not have_DYLD_LIBRARY_PATH:
os.environ["DYLD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["DYLD_LIBRARY_PATH"] += ":" + path
if options.verbose:
print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"]
elif sys.platform == "win32":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ';' + path
if options.verbose:
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
elif sys.platform == "cygwin":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ":" + path
if options.verbose:
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
else:
if not have_LD_LIBRARY_PATH:
os.environ["LD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["LD_LIBRARY_PATH"] += ":" + path
if options.verbose:
print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"]
#
# Short note on generating suppressions:
#
# See the valgrind documentation for a description of suppressions. The easiest
# way to generate a suppression expression is by using the valgrind
# --gen-suppressions option. To do that you have to figure out how to run the
# test in question.
#
# If you do "test.py -v -g -s <suitename> then test.py will output most of what
# you need. For example, if you are getting a valgrind error in the
# devices-mesh-dot11s-regression test suite, you can run:
#
# ./test.py -v -g -s devices-mesh-dot11s-regression
#
# You should see in the verbose output something that looks like:
#
# Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output/2010-01-12-22-47-50-CUT
# --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
#
# You need to pull out the useful pieces, and so could run the following to
# reproduce your error:
#
# valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Hint: Use the first part of the command as is, and point the "tempdir" to
# somewhere real. You don't need to specify an "out" file.
#
# When you run the above command you should see your valgrind error. The
# suppression expression(s) can be generated by adding the --gen-suppressions=yes
# option to valgrind. Use something like:
#
# valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Now when valgrind detects an error it will ask:
#
# ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
#
# to which you just enter 'y'<ret>.
#
# You will be provided with a suppression expression that looks something like
# the following:
# {
# <insert_a_suppression_name_here>
# Memcheck:Addr8
# fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
# fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
# fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
# ...
# the rest of the stack frame
# ...
# }
#
# You need to add a supression name which will only be printed out by valgrind in
# verbose mode (but it needs to be there in any case). The entire stack frame is
# shown to completely characterize the error, but in most cases you won't need
# all of that info. For example, if you want to turn off all errors that happen
# when the function (fun:) is called, you can just delete the rest of the stack
# frame. You can also use wildcards to make the mangled signatures more readable.
#
# I added the following to the testpy.supp file for this particular error:
#
# {
# Supress invalid read size errors in SendPreq() when using HwmpProtocolMac
# Memcheck:Addr8
# fun:*HwmpProtocolMac*SendPreq*
# }
#
# Now, when you run valgrind the error will be suppressed.
#
VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"
def run_job_synchronously(shell_command, directory, valgrind, is_python, build_path=""):
suppressions_path = os.path.join (NS3_BASEDIR, VALGRIND_SUPPRESSIONS_FILE)
if is_python:
path_cmd = PYTHON[0] + " " + os.path.join (NS3_BASEDIR, shell_command)
else:
if len(build_path):
path_cmd = os.path.join (build_path, shell_command)
else:
path_cmd = os.path.join (NS3_BUILDDIR, shell_command)
if valgrind:
cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path,
path_cmd)
else:
cmd = path_cmd
if options.verbose:
print "Synchronously execute %s" % cmd
start_time = time.time()
proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
elapsed_time = time.time() - start_time
retval = proc.returncode
#
# valgrind sometimes has its own idea about what kind of memory management
# errors are important. We want to detect *any* leaks, so the way to do
# that is to look for the presence of a valgrind leak summary section.
#
# If another error has occurred (like a test suite has failed), we don't
# want to trump that error, so only do the valgrind output scan if the
# test has otherwise passed (return code was zero).
#
if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results:
retval = 2
if options.verbose:
print "Return code = ", retval
print "stderr = ", stderr_results
return (retval, stdout_results, stderr_results, elapsed_time)
#
# This class defines a unit of testing work. It will typically refer to
# a test suite to run using the test-runner, or an example to run directly.
#
class Job:
def __init__(self):
self.is_break = False
self.is_skip = False
self.is_example = False
self.is_pyexample = False
self.shell_command = ""
self.display_name = ""
self.basedir = ""
self.tempdir = ""
self.cwd = ""
self.tmp_file_name = ""
self.returncode = False
self.elapsed_time = 0
self.build_path = ""
#
# A job is either a standard job or a special job indicating that a worker
# thread should exist. This special job is indicated by setting is_break
# to true.
#
def set_is_break(self, is_break):
self.is_break = is_break
#
# If a job is to be skipped, we actually run it through the worker threads
# to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
#
def set_is_skip(self, is_skip):
self.is_skip = is_skip
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_example(self, is_example):
self.is_example = is_example
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_pyexample(self, is_pyexample):
self.is_pyexample = is_pyexample
#
# This is the shell command that will be executed in the job. For example,
#
# "utils/ns3-dev-test-runner-debug --test-name=some-test-suite"
#
def set_shell_command(self, shell_command):
self.shell_command = shell_command
#
# This is the build path where ns-3 was built. For example,
#
# "/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build/debug"
#
def set_build_path(self, build_path):
self.build_path = build_path
#
# This is the dispaly name of the job, typically the test suite or example
# name. For example,
#
# "some-test-suite" or "udp-echo"
#
def set_display_name(self, display_name):
self.display_name = display_name
#
# This is the base directory of the repository out of which the tests are
# being run. It will be used deep down in the testing framework to determine
# where the source directory of the test was, and therefore where to find
# provided test vectors. For example,
#
# "/home/user/repos/ns-3-dev"
#
def set_basedir(self, basedir):
self.basedir = basedir
#
# This is the directory to which a running test suite should write any
# temporary files.
#
def set_tempdir(self, tempdir):
self.tempdir = tempdir
#
# This is the current working directory that will be given to an executing
# test as it is being run. It will be used for examples to tell them where
# to write all of the pcap files that we will be carefully ignoring. For
# example,
#
# "/tmp/unchecked-traces"
#
def set_cwd(self, cwd):
self.cwd = cwd
#
# This is the temporary results file name that will be given to an executing
# test as it is being run. We will be running all of our tests in parallel
# so there must be multiple temporary output files. These will be collected
# into a single XML file at the end and then be deleted.
#
def set_tmp_file_name(self, tmp_file_name):
self.tmp_file_name = tmp_file_name
#
# The return code received when the job process is executed.
#
def set_returncode(self, returncode):
self.returncode = returncode
#
# The elapsed real time for the job execution.
#
def set_elapsed_time(self, elapsed_time):
self.elapsed_time = elapsed_time
#
# The worker thread class that handles the actual running of a given test.
# Once spawned, it receives requests for work through its input_queue and
# ships the results back through the output_queue.
#
class worker_thread(threading.Thread):
def __init__(self, input_queue, output_queue):
threading.Thread.__init__(self)
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
job = self.input_queue.get()
#
# Worker threads continue running until explicitly told to stop with
# a special job.
#
if job.is_break:
return
#
# If the global interrupt handler sets the thread_exit variable,
# we stop doing real work and just report back a "break" in the
# normal command processing has happened.
#
if thread_exit == True:
job.set_is_break(True)
self.output_queue.put(job)
continue
#
# If we are actually supposed to skip this job, do so. Note that
# if is_skip is true, returncode is undefined.
#
if job.is_skip:
if options.verbose:
print "Skip %s" % job.shell_command
self.output_queue.put(job)
continue
#
# Otherwise go about the business of running tests as normal.
#
else:
if options.verbose:
print "Launch %s" % job.shell_command
if job.is_example or job.is_pyexample:
#
# If we have an example, the shell command is all we need to
# know. It will be something like "examples/udp/udp-echo" or
# "examples/wireless/mixed-wireless.py"
#
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command,
job.cwd, options.valgrind, job.is_pyexample, job.build_path)
else:
#
# If we're a test suite, we need to provide a little more info
# to the test runner, specifically the base directory and temp
# file name
#
if options.update_data:
update_data = '--update-data'
else:
update_data = ''
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command +
" --xml --tempdir=%s --out=%s %s" % (job.tempdir, job.tmp_file_name, update_data),
job.cwd, options.valgrind, False)
job.set_elapsed_time(et)
if options.verbose:
print "returncode = %d" % job.returncode
print "---------- begin standard out ----------"
print standard_out
print "---------- begin standard err ----------"
print standard_err
print "---------- end standard err ----------"
self.output_queue.put(job)
#
# This is the main function that does the work of interacting with the
# test-runner itself.
#
def run_tests():
#
# Pull some interesting configuration information out of waf, primarily
# so we can know where executables can be found, but also to tell us what
# pieces of the system have been built. This will tell us what examples
# are runnable.
#
read_waf_config()
#
# Set the proper suffix.
#
global BUILD_PROFILE_SUFFIX
if BUILD_PROFILE == 'release':
BUILD_PROFILE_SUFFIX = ""
else:
BUILD_PROFILE_SUFFIX = "-" + BUILD_PROFILE
#
# Add the proper prefix and suffix to the test-runner name to
# match what is done in the wscript file.
#
test_runner_name = "%s%s-%s%s" % (APPNAME, VERSION, "test-runner", BUILD_PROFILE_SUFFIX)
#
# Run waf to make sure that everything is built, configured and ready to go
# unless we are explicitly told not to. We want to be careful about causing
# our users pain while waiting for extraneous stuff to compile and link, so
# we allow users that know what they''re doing to not invoke waf at all.
#
if not options.nowaf:
#
# If the user is running the "kinds" or "list" options, there is an
# implied dependency on the test-runner since we call that program
# if those options are selected. We will exit after processing those
# options, so if we see them, we can safely only build the test-runner.
#
# If the user has constrained us to running only a particular type of
# file, we can only ask waf to build what we know will be necessary.
# For example, if the user only wants to run BVT tests, we only have
# to build the test-runner and can ignore all of the examples.
#
# If the user only wants to run a single example, then we can just build
# that example.
#
# If there is no constraint, then we have to build everything since the
# user wants to run everything.
#
if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
if sys.platform == "win32":
waf_cmd = "waf --target=test-runner"
else:
waf_cmd = "./waf --target=test-runner"
elif len(options.example):
if sys.platform == "win32":
waf_cmd = "waf --target=%s" % os.path.basename(options.example)
else:
waf_cmd = "./waf --target=%s" % os.path.basename(options.example)
else:
if sys.platform == "win32":
waf_cmd = "waf"
else:
waf_cmd = "./waf"
if options.verbose:
print "Building: %s" % waf_cmd
proc = subprocess.Popen(waf_cmd, shell = True)
proc.communicate()
if proc.returncode:
print >> sys.stderr, "Waf died. Not running tests"
return proc.returncode
#
# Dynamically set up paths.
#
make_paths()
#
# Get the information from the build status file.
#
build_status_file = os.path.join (NS3_BUILDDIR, 'build-status.py')
if os.path.exists(build_status_file):
ns3_runnable_programs = get_list_from_file(build_status_file, "ns3_runnable_programs")
ns3_runnable_scripts = get_list_from_file(build_status_file, "ns3_runnable_scripts")
else:
print >> sys.stderr, 'The build status file was not found. You must do waf build before running test.py.'
sys.exit(2)
#
# Make a dictionary that maps the name of a program to its path.
#
ns3_runnable_programs_dictionary = {}
for program in ns3_runnable_programs:
# Remove any directory names from path.
program_name = os.path.basename(program)
ns3_runnable_programs_dictionary[program_name] = program
# Generate the lists of examples to run as smoke tests in order to
# ensure that they remain buildable and runnable over time.
#
example_tests = []
example_names_original = []
python_tests = []
for directory in EXAMPLE_DIRECTORIES:
# Set the directories and paths for this example.
example_directory = os.path.join("examples", directory)
examples_to_run_path = os.path.join(example_directory, "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this example directory's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests)
for module in NS3_ENABLED_MODULES:
# Remove the "ns3-" from the module name.
module = module[len("ns3-"):]
# Set the directories and paths for this example.
module_directory = os.path.join("src", module)
example_directory = os.path.join(module_directory, "examples")
examples_to_run_path = os.path.join(module_directory, "test", "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this module's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests)
#
# If lots of logging is enabled, we can crash Python when it tries to
# save all of the text. We just don't allow logging to be turned on when
# test.py runs. If you want to see logging output from your tests, you
# have to run them using the test-runner directly.
#
os.environ["NS_LOG"] = ""
#
# There are a couple of options that imply we can to exit before starting
# up a bunch of threads and running tests. Let's detect these cases and
# handle them without doing all of the hard work.
#
if options.kinds:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-type-list")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
print standard_out
if options.list:
if len(options.constrain):
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types --test-type=%s" % options.constrain)
else:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
list_items = standard_out.split('\n')
list_items.sort()
print "Test Type Test Name"
print "--------- ---------"
for item in list_items:
if len(item.strip()):
print item
example_names_original.sort()
for item in example_names_original:
print "example ", item
print
if options.kinds or options.list:
return
#
# We communicate results in two ways. First, a simple message relating
# PASS, FAIL, CRASH or SKIP is always written to the standard output. It
# is expected that this will be one of the main use cases. A developer can
# just run test.py with no options and see that all of the tests still
# pass.
#
# The second main use case is when detailed status is requested (with the
# --text or --html options). Typicall this will be text if a developer
# finds a problem, or HTML for nightly builds. In these cases, an
# XML file is written containing the status messages from the test suites.
# This file is then read and translated into text or HTML. It is expected
# that nobody will really be interested in the XML, so we write it somewhere
# with a unique name (time) to avoid collisions. In case an error happens, we
# provide a runtime option to retain the temporary files.
#
# When we run examples as smoke tests, they are going to want to create
# lots and lots of trace files. We aren't really interested in the contents
# of the trace files, so we also just stash them off in the temporary dir.
# The retain option also causes these unchecked trace files to be kept.
#
date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())
if not os.path.exists(TMP_OUTPUT_DIR):
os.makedirs(TMP_OUTPUT_DIR)
testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);
if not os.path.exists(testpy_output_dir):
os.makedirs(testpy_output_dir)
#
# Create the main output file and start filling it with XML. We need to
# do this since the tests will just append individual results to this file.
#
xml_results_file = os.path.join(testpy_output_dir, "results.xml")
f = open(xml_results_file, 'w')
f.write('<?xml version="1.0"?>\n')
f.write('<Results>\n')
f.close()
#
# We need to figure out what test suites to execute. We are either given one
# suite or example explicitly via the --suite or --example/--pyexample option,
# or we need to call into the test runner and ask it to list all of the available
# test suites. Further, we need to provide the constraint information if it
# has been given to us.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test,py: run all of the suites and examples
# ./test.py --constrain=core: run all of the suites of all kinds
# ./test.py --constrain=unit: run all unit suites
# ./test.py --suite=some-test-suite: run a single suite
# ./test.py --example=examples/udp/udp-echo: run single example
# ./test.py --pyexample=examples/wireless/mixed-wireless.py: run python example
# ./test.py --suite=some-suite --example=some-example: run the single suite
#
# We can also use the --constrain option to provide an ordering of test
# execution quite easily.
#
if len(options.suite):
# See if this is a valid test suite.
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if options.suite in suites.split('\n'):
suites = options.suite + "\n"
else:
print >> sys.stderr, 'The test suite was not run because an unknown test suite name was requested.'
sys.exit(2)
elif len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain):
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % options.constrain)
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
suites = ""
#
# suite_list will either a single test suite name that the user has
# indicated she wants to run or a list of test suites provided by
# the test-runner possibly according to user provided constraints.
# We go through the trouble of setting up the parallel execution
# even in the case of a single suite to avoid having two process the
# results in two different places.
#
suite_list = suites.split('\n')
#
# Performance tests should only be run when they are requested,
# i.e. they are not run by default in test.py.
#
if options.constrain != 'performance':
# Get a list of all of the performance tests.
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % "performance")
(rc, performance_tests, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
performance_test_list = performance_tests.split('\n')
# Remove any performance tests from the suites list.
for performance_test in performance_test_list:
if performance_test in suite_list:
suite_list.remove(performance_test)
# We now have a possibly large number of test suites to run, so we want to
# run them in parallel. We're going to spin up a number of worker threads
# that will run our test jobs for us.
#
input_queue = Queue.Queue(0)
output_queue = Queue.Queue(0)
jobs = 0
threads=[]
#
# In Python 2.6 you can just use multiprocessing module, but we don't want
# to introduce that dependency yet; so we jump through a few hoops.
#
processors = 1
if sys.platform != "win32":
if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
processors = os.sysconf('SC_NPROCESSORS_ONLN')
else:
proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
if len(stderr_results) == 0:
processors = int(stdout_results)
#
# Now, spin up one thread per processor which will eventually mean one test
# per processor running concurrently.
#
for i in range(processors):
thread = worker_thread(input_queue, output_queue)
threads.append(thread)
thread.start()
#
# Keep track of some summary statistics
#
total_tests = 0
skipped_tests = 0
#
# We now have worker threads spun up, and a list of work to do. So, run
# through the list of test suites and dispatch a job to run each one.
#
# Dispatching will run with unlimited speed and the worker threads will
# execute as fast as possible from the queue.
#
# Note that we actually dispatch tests to be skipped, so all of the
# PASS, FAIL, CRASH and SKIP processing is done in the same place.
#
for test in suite_list:
test = test.strip()
if len(test):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
job.set_cwd(os.getcwd())
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
if (options.multiple):
multiple = ""
else:
multiple = " --stop-on-failure"
if (len(options.fullness)):
fullness = options.fullness.upper()
fullness = " --fullness=%s" % fullness
else:
fullness = " --fullness=QUICK"
path_cmd = os.path.join("utils", test_runner_name + " --test-name=%s%s%s" % (test, multiple, fullness))
job.set_shell_command(path_cmd)
if options.valgrind and test in core_valgrind_skip_tests:
job.set_is_skip(True)
# Skip tests that will fail if NSC is missing.
if not NSC_ENABLED and test in core_nsc_missing_skip_tests:
job.set_is_skip(True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# We've taken care of the discovered or specified test suites. Now we
# have to deal with examples run as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied. For example, if an example depends
# on NSC being configured by waf, that example should have a condition
# that evaluates to true if NSC is enabled. For example,
#
# ("tcp-nsc-zoo", "NSC_ENABLED == True"),
#
# In this case, the example "tcp-nsc-zoo" will only be run if we find the
# waf configuration variable "NSC_ENABLED" to be True.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# XXX As it stands, all of the trace files have unique names, and so file
# collisions can only happen if two instances of an example are running in
# two versions of the test.py process concurrently. We may want to create
# uniquely named temporary traces directories to avoid this problem.
#
# We need to figure out what examples to execute. We are either given one
# suite or example explicitly via the --suite or --example option, or we
# need to walk the list of examples looking for available example
# conditions.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test.py: run all of the examples
# ./test.py --constrain=unit run no examples
# ./test.py --constrain=example run all of the examples
# ./test.py --suite=some-test-suite: run no examples
# ./test.py --example=some-example: run the single example
# ./test.py --suite=some-suite --example=some-example: run the single example
#
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "example":
if ENABLE_EXAMPLES:
for test, do_run, do_valgrind_run in example_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if ns3_runnable_programs_dictionary.has_key(test_name):
if eval(do_run):
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path(options.buildpath)
if options.valgrind and not eval(do_valgrind_run):
job.set_is_skip (True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.example):
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
example_name = "%s%s-%s%s" % (APPNAME, VERSION, options.example, BUILD_PROFILE_SUFFIX)
# Don't try to run this example if it isn't runnable.
if not ns3_runnable_programs_dictionary.has_key(example_name):
print "Example %s is not runnable." % example_name
else:
#
# If you tell me to run an example, I will try and run the example
# irrespective of any condition.
#
example_path = ns3_runnable_programs_dictionary[example_name]
example_path = os.path.abspath(example_path)
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(example_path)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(example_path)
job.set_build_path(options.buildpath)
if options.verbose:
print "Queue %s" % example_name
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Run some Python examples as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# We need to figure out what python examples to execute. We are either
# given one pyexample explicitly via the --pyexample option, or we
# need to walk the list of python examples
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test.py --constrain=pyexample run all of the python examples
# ./test.py --pyexample=some-example.py: run the single python example
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "pyexample":
if ENABLE_EXAMPLES:
for test, do_run in python_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if test_name in ns3_runnable_scripts:
if eval(do_run):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(True)
job.set_display_name(test)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path("")
#
# Python programs and valgrind do not work and play
# well together, so we skip them under valgrind.
# We go through the trouble of doing all of this
# work to report the skipped tests in a consistent
# way throught the output formatter.
#
if options.valgrind:
job.set_is_skip (True)
#
# The user can disable python bindings, so we need
# to pay attention to that and give some feedback
# that we're not testing them
#
if not ENABLE_PYTHON_BINDINGS:
job.set_is_skip (True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.pyexample):
# Don't try to run this example if it isn't runnable.
example_name = os.path.basename(options.pyexample)
if example_name not in ns3_runnable_scripts:
print "Example %s is not runnable." % example_name
else:
#
# If you tell me to run a python example, I will try and run the example
# irrespective of any condition.
#
job = Job()
job.set_is_pyexample(True)
job.set_display_name(options.pyexample)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(options.pyexample)
job.set_build_path("")
if options.verbose:
print "Queue %s" % options.pyexample
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Tell the worker threads to pack up and go home for the day. Each one
# will exit when they see their is_break task.
#
for i in range(processors):
job = Job()
job.set_is_break(True)
input_queue.put(job)
#
# Now all of the tests have been dispatched, so all we have to do here
# in the main thread is to wait for them to complete. Keyboard interrupt
# handling is broken as mentioned above. We use a signal handler to catch
# sigint and set a global variable. When the worker threads sense this
# they stop doing real work and will just start throwing jobs back at us
# with is_break set to True. In this case, there are no real results so we
# ignore them. If there are real results, we always print PASS or FAIL to
# standard out as a quick indication of what happened.
#
passed_tests = 0
failed_tests = 0
crashed_tests = 0
valgrind_errors = 0
for i in range(jobs):
job = output_queue.get()
if job.is_break:
continue
if job.is_example or job.is_pyexample:
kind = "Example"
else:
kind = "TestSuite"
if job.is_skip:
status = "SKIP"
skipped_tests = skipped_tests + 1
else:
if job.returncode == 0:
status = "PASS"
passed_tests = passed_tests + 1
elif job.returncode == 1:
failed_tests = failed_tests + 1
status = "FAIL"
elif job.returncode == 2:
valgrind_errors = valgrind_errors + 1
status = "VALGR"
else:
crashed_tests = crashed_tests + 1
status = "CRASH"
if options.duration or options.constrain == "performance":
print "%s (%.3f): %s %s" % (status, job.elapsed_time, kind, job.display_name)
else:
print "%s: %s %s" % (status, kind, job.display_name)
if job.is_example or job.is_pyexample:
#
# Examples are the odd man out here. They are written without any
# knowledge that they are going to be run as a test, so we need to
# cook up some kind of output for them. We're writing an xml file,
# so we do some simple XML that says we ran the example.
#
# XXX We could add some timing information to the examples, i.e. run
# them through time and print the results here.
#
f = open(xml_results_file, 'a')
f.write('<Example>\n')
example_name = " <Name>%s</Name>\n" % job.display_name
f.write(example_name)
if status == "PASS":
f.write(' <Result>PASS</Result>\n')
elif status == "FAIL":
f.write(' <Result>FAIL</Result>\n')
elif status == "VALGR":
f.write(' <Result>VALGR</Result>\n')
elif status == "SKIP":
f.write(' <Result>SKIP</Result>\n')
else:
f.write(' <Result>CRASH</Result>\n')
f.write(' <Time real="%.3f"/>\n' % job.elapsed_time)
f.write('</Example>\n')
f.close()
else:
#
# If we're not running an example, we're running a test suite.
# These puppies are running concurrently and generating output
# that was written to a temporary file to avoid collisions.
#
# Now that we are executing sequentially in the main thread, we can
# concatenate the contents of the associated temp file to the main
# results file and remove that temp file.
#
# One thing to consider is that a test suite can crash just as
# well as any other program, so we need to deal with that
# possibility as well. If it ran correctly it will return 0
# if it passed, or 1 if it failed. In this case, we can count
# on the results file it saved being complete. If it crashed, it
# will return some other code, and the file should be considered
# corrupt and useless. If the suite didn't create any XML, then
# we're going to have to do it ourselves.
#
# Another issue is how to deal with a valgrind error. If we run
# a test suite under valgrind and it passes, we will get a return
# code of 0 and there will be a valid xml results file since the code
# ran to completion. If we get a return code of 1 under valgrind,
# the test case failed, but valgrind did not find any problems so the
# test case return code was passed through. We will have a valid xml
# results file here as well since the test suite ran. If we see a
# return code of 2, this means that valgrind found an error (we asked
# it to return 2 if it found a problem in run_job_synchronously) but
# the suite ran to completion so there is a valid xml results file.
# If the suite crashes under valgrind we will see some other error
# return code (like 139). If valgrind finds an illegal instruction or
# some other strange problem, it will die with its own strange return
# code (like 132). However, if the test crashes by itself, not under
# valgrind we will also see some other return code.
#
# If the return code is 0, 1, or 2, we have a valid xml file. If we
# get another return code, we have no xml and we can't really say what
# happened -- maybe the TestSuite crashed, maybe valgrind crashed due
# to an illegal instruction. If we get something beside 0-2, we assume
# a crash and fake up an xml entry. After this is all done, we still
# need to indicate a valgrind error somehow, so we fake up an xml entry
# with a VALGR result. Thus, in the case of a working TestSuite that
# fails valgrind, we'll see the PASS entry for the working TestSuite
# followed by a VALGR failing test suite of the same name.
#
if job.is_skip:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>SKIP</Result>\n')
f.write("</Test>\n")
f.close()
else:
if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
f_to = open(xml_results_file, 'a')
f_from = open(job.tmp_file_name)
f_to.write(f_from.read())
f_to.close()
f_from.close()
else:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>CRASH</Suite>\n')
f.write("</Test>\n")
f.close()
if job.returncode == 2:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>VALGR</Result>\n')
f.write("</Test>\n")
f.close()
#
# We have all of the tests run and the results written out. One final
# bit of housekeeping is to wait for all of the threads to close down
# so we can exit gracefully.
#
for thread in threads:
thread.join()
#
# Back at the beginning of time, we started the body of an XML document
# since the test suites and examples were going to just write their
# individual pieces. So, we need to finish off and close out the XML
# document
#
f = open(xml_results_file, 'a')
f.write('</Results>\n')
f.close()
#
# Print a quick summary of events
#
print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests,
total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors)
#
# The last things to do are to translate the XML results file to "human
# readable form" if the user asked for it (or make an XML file somewhere)
#
if len(options.html):
translate_to_html(xml_results_file, options.html)
if len(options.text):
translate_to_text(xml_results_file, options.text)
if len(options.xml):
shutil.copyfile(xml_results_file, options.xml)
#
# Let the user know if they need to turn on tests or examples.
#
if not ENABLE_TESTS or not ENABLE_EXAMPLES:
print
if not ENABLE_TESTS:
print '*** Note: ns-3 tests are currently disabled. Enable them by adding'
print '*** "--enable-tests" to ./waf configure or modifying your .ns3rc file.'
print
if not ENABLE_EXAMPLES:
print '*** Note: ns-3 examples are currently disabled. Enable them by adding'
print '*** "--enable-examples" to ./waf configure or modifying your .ns3rc file.'
print
#
# Let the user know if they tried to use valgrind but it was not
# present on their machine.
#
if options.valgrind and not VALGRIND_FOUND:
print
print '*** Note: you are trying to use valgrind, but valgrind could not be found'
print '*** on your machine. All tests and examples will crash or be skipped.'
print
#
# If we have been asked to retain all of the little temporary files, we
# don't delete tm. If we do delete the temporary files, delete only the
# directory we just created. We don't want to happily delete any retained
# directories, which will probably surprise the user.
#
if not options.retain:
shutil.rmtree(testpy_output_dir)
if passed_tests + skipped_tests == total_tests:
return 0 # success
else:
return 1 # catchall for general errors
def main(argv):
parser = optparse.OptionParser()
parser.add_option("-b", "--buildpath", action="store", type="string", dest="buildpath", default="",
metavar="BUILDPATH",
help="specify the path where ns-3 was built (defaults to the build directory for the current variant)")
parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
metavar="KIND",
help="constrain the test-runner by kind of test")
parser.add_option("-d", "--duration", action="store_true", dest="duration", default=False,
help="print the duration of each test suite and example")
parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
metavar="EXAMPLE",
help="specify a single example to run (no relative path is needed)")
parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False,
help="If examples use reference data files, get them to re-generate them")
parser.add_option("-f", "--fullness", action="store", type="string", dest="fullness", default="QUICK",
metavar="FULLNESS",
help="choose the duration of tests to run: QUICK, EXTENSIVE, or TAKES_FOREVER, where EXTENSIVE includes QUICK and TAKES_FOREVER includes QUICK and EXTENSIVE (only QUICK tests are run by default)")
parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
help="run the test suites and examples using valgrind")
parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
help="print the kinds of tests available")
parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
help="print the list of known tests")
parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
help="report multiple failures from test suites and test cases")
parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
help="do not run waf before starting testing")
parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
metavar="PYEXAMPLE",
help="specify a single python example to run (with relative path)")
parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
help="retain all temporary files (which are normally deleted)")
parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
metavar="TEST-SUITE",
help="specify a single test suite to run")
parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
metavar="TEXT-FILE",
help="write detailed test results into TEXT-FILE.txt")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="print progress and informational messages")
parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
metavar="HTML-FILE",
help="write detailed test results into HTML-FILE.html")
parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
metavar="XML-FILE",
help="write detailed test results into XML-FILE.xml")
global options
options = parser.parse_args()[0]
signal.signal(signal.SIGINT, sigint_hook)
return run_tests()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-2.0
| -8,879,720,097,523,576,000
| 39.474161
| 218
| 0.576649
| false
| 3.955535
| true
| false
| false
|
urandu/gumbo-parser
|
python/gumbo/gumboc.py
|
1
|
12165
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""CTypes bindings for the Gumbo HTML5 parser.
This exports the raw interface of the library as a set of very thin ctypes
wrappers. It's intended to be wrapped by other libraries to provide a more
Pythonic API.
"""
__author__ = 'jdtang@google.com (Jonathan Tang)'
import sys
import contextlib
import ctypes
import os.path
import gumboc_tags
_name_of_lib = 'libgumbo.so'
if sys.platform.startswith('darwin'):
_name_of_lib = 'libgumbo.dylib'
elif sys.platform.startswith('win'):
_name_of_lib = "gumbo.dll"
try:
# First look for a freshly-built .so in the .libs directory, for development.
_dll = ctypes.cdll.LoadLibrary(os.path.join(
os.path.dirname(__file__), '..', '..', '.libs', _name_of_lib))
except OSError:
# PyPI or setuptools install, look in the current directory.
_dll = ctypes.cdll.LoadLibrary(os.path.join(
os.path.dirname(__file__), _name_of_lib))
except OSError:
# System library, on unix or mac osx
_dll = ctypes.cdll.LoadLibrary(_name_of_lib)
# Some aliases for common types.
_bitvector = ctypes.c_uint
_Ptr = ctypes.POINTER
class EnumMetaclass(type(ctypes.c_uint)):
def __new__(metaclass, name, bases, cls_dict):
cls = type(ctypes.c_uint).__new__(metaclass, name, bases, cls_dict)
if name == 'Enum':
return cls
try:
for i, value in enumerate(cls_dict['_values_']):
setattr(cls, value, cls.from_param(i))
except KeyError:
raise ValueError('No _values_ list found inside enum type.')
except TypeError:
raise ValueError('_values_ must be a list of names of enum constants.')
return cls
def with_metaclass(mcls):
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
@with_metaclass(EnumMetaclass)
class Enum(ctypes.c_uint):
@classmethod
def from_param(cls, param):
if isinstance(param, Enum):
if param.__class__ != cls:
raise ValueError("Can't mix enums of different types")
return param
if param < 0 or param > len(cls._values_):
raise ValueError('%d is out of range for enum type %s; max %d.' %
(param, cls.__name__, len(cls._values_)))
return cls(param)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __hash__(self):
return hash(self.value)
def __repr__(self):
try:
return self._values_[self.value]
except IndexError:
raise IndexError('Value %d is out of range for %r' %
(self.value, self._values_))
class StringPiece(ctypes.Structure):
_fields_ = [
('data', _Ptr(ctypes.c_char)),
('length', ctypes.c_size_t),
]
def __len__(self):
return self.length
def __str__(self):
return ctypes.string_at(self.data, self.length)
class SourcePosition(ctypes.Structure):
_fields_ = [
('line', ctypes.c_uint),
('column', ctypes.c_uint),
('offset', ctypes.c_uint)
]
SourcePosition.EMPTY = SourcePosition.in_dll(_dll, 'kGumboEmptySourcePosition')
class AttributeNamespace(Enum):
URLS = [
'http://www.w3.org/1999/xhtml',
'http://www.w3.org/1999/xlink',
'http://www.w3.org/XML/1998/namespace',
'http://www.w3.org/2000/xmlns',
]
_values_ = ['NONE', 'XLINK', 'XML', 'XMLNS']
def to_url(self):
return self.URLS[self.value]
class Attribute(ctypes.Structure):
_fields_ = [
('namespace', AttributeNamespace),
('name', ctypes.c_char_p),
('original_name', StringPiece),
('value', ctypes.c_char_p),
('original_value', StringPiece),
('name_start', SourcePosition),
('name_end', SourcePosition),
('value_start', SourcePosition),
('value_end', SourcePosition)
]
class Vector(ctypes.Structure):
_type_ = ctypes.c_void_p
_fields_ = [
('data', _Ptr(ctypes.c_void_p)),
('length', ctypes.c_uint),
('capacity', ctypes.c_uint)
]
class Iter(object):
def __init__(self, vector):
self.current = 0
self.vector = vector
def __iter__(self):
return self
def __next__(self):
# Python 3
if self.current >= self.vector.length:
raise StopIteration
obj = self.vector[self.current]
self.current += 1
return obj
def next(self):
# Python 2
return self.__next__()
def __len__(self):
return self.length
def __getitem__(self, i):
try:
# Python 2
numeric_types = (int, long)
except NameError:
# Python 3
numeric_types = int
if isinstance(i, numeric_types):
if i < 0:
i += self.length
if i > self.length:
raise IndexError
array_type = _Ptr(_Ptr(self._type_))
return ctypes.cast(self.data, array_type)[i].contents
return list(self)[i]
def __iter__(self):
return Vector.Iter(self)
Vector.EMPTY = Vector.in_dll(_dll, 'kGumboEmptyVector')
class AttributeVector(Vector):
_type_ = Attribute
class NodeVector(Vector):
# _type_ assigned later, to avoid circular references with Node
pass
class QuirksMode(Enum):
_values_ = ['NO_QUIRKS', 'QUIRKS', 'LIMITED_QUIRKS']
class Document(ctypes.Structure):
_fields_ = [
('children', NodeVector),
('has_doctype', ctypes.c_bool),
('name', ctypes.c_char_p),
('public_identifier', ctypes.c_char_p),
('system_identifier', ctypes.c_char_p),
('doc_type_quirks_mode', QuirksMode),
]
def __repr__(self):
return 'Document'
class Namespace(Enum):
URLS = [
'http://www.w3.org/1999/xhtml',
'http://www.w3.org/2000/svg',
'http://www.w3.org/1998/Math/MathML',
]
_values_ = ['HTML', 'SVG', 'MATHML']
def to_url(self):
return self.URLS[self.value]
class Tag(Enum):
@staticmethod
def from_str(tagname):
text_ptr = ctypes.c_char_p(tagname.encode('utf-8'))
return _tag_enum(text_ptr)
_values_ = gumboc_tags.TagNames + ['UNKNOWN', 'LAST']
class Element(ctypes.Structure):
_fields_ = [
('children', NodeVector),
('tag', Tag),
('tag_namespace', Namespace),
('original_tag', StringPiece),
('original_end_tag', StringPiece),
('start_pos', SourcePosition),
('end_pos', SourcePosition),
('attributes', AttributeVector),
]
@property
def tag_name(self):
original_tag = StringPiece.from_buffer_copy(self.original_tag)
_tag_from_original_text(ctypes.byref(original_tag))
if self.tag_namespace == Namespace.SVG:
svg_tagname = _normalize_svg_tagname(ctypes.byref(original_tag))
if svg_tagname is not None:
return str(svg_tagname)
if self.tag == Tag.UNKNOWN:
if original_tag.data is None:
return ''
return str(original_tag).lower()
return _tagname(self.tag)
def __repr__(self):
return ('<%r>\n' % self.tag +
'\n'.join(repr(child) for child in self.children) +
'</%r>' % self.tag)
class Text(ctypes.Structure):
_fields_ = [
('text', ctypes.c_char_p),
('original_text', StringPiece),
('start_pos', SourcePosition)
]
def __repr__(self):
return 'Text(%r)' % self.text
class NodeType(Enum):
_values_ = ['DOCUMENT', 'ELEMENT', 'TEXT', 'CDATA',
'COMMENT', 'WHITESPACE', 'TEMPLATE']
class NodeUnion(ctypes.Union):
_fields_ = [
('document', Document),
('element', Element),
('text', Text),
]
class Node(ctypes.Structure):
# _fields_ set later to avoid a circular reference
def _contents(self):
# Python3 enters an infinite loop if you use an @property within
# __getattr__, so we factor it out to a helper.
if self.type == NodeType.DOCUMENT:
return self.v.document
elif self.type in (NodeType.ELEMENT, NodeType.TEMPLATE):
return self.v.element
else:
return self.v.text
@property
def contents(self):
return self._contents()
def __getattr__(self, name):
return getattr(self._contents(), name)
def __setattr__(self, name, value):
return setattr(self._contents(), name, value)
def __repr__(self):
return repr(self.contents)
Node._fields_ = [
('type', NodeType),
# Set the type to Node later to avoid a circular dependency.
('parent', _Ptr(Node)),
('next', _Ptr(Node)),
('prev', _Ptr(Node)),
('index_within_parent', ctypes.c_size_t),
# TODO(jdtang): Make a real list of enum constants for this.
('parse_flags', _bitvector),
('v', NodeUnion)
]
NodeVector._type_ = Node
class Options(ctypes.Structure):
_fields_ = [
# TODO(jdtang): Allow the Python API to set the allocator/deallocator
# function. Right now these are treated as opaque void pointers.
('allocator', ctypes.c_void_p),
('deallocator', ctypes.c_void_p),
('userdata', ctypes.c_void_p),
('tab_stop', ctypes.c_int),
('stop_on_first_error', ctypes.c_bool),
('max_errors', ctypes.c_int),
]
class Output(ctypes.Structure):
_fields_ = [
('document', _Ptr(Node)),
('root', _Ptr(Node)),
# TODO(jdtang): Error type.
('errors', Vector),
]
@contextlib.contextmanager
def parse(text, **kwargs):
options = Options()
context_tag = kwargs.get('container', Tag.LAST)
context_namespace = kwargs.get('container_namespace', Namespace.HTML)
for field_name, _ in Options._fields_:
try:
setattr(options, field_name, kwargs[field_name])
except KeyError:
setattr(options, field_name, getattr(_DEFAULT_OPTIONS, field_name))
# We have to manually take a reference to the input text here so that it
# outlives the parse output. If we let ctypes do it automatically on function
# call, it creates a temporary buffer which is destroyed when the call
# completes, and then the original_text pointers point into invalid memory.
text_ptr = ctypes.c_char_p(text.encode('utf-8'))
output = _parse_fragment(
ctypes.byref(options), text_ptr, len(text),
context_tag, context_namespace)
try:
yield output
finally:
_destroy_output(ctypes.byref(options), output)
_DEFAULT_OPTIONS = Options.in_dll(_dll, 'kGumboDefaultOptions')
_parse_with_options = _dll.gumbo_parse_with_options
_parse_with_options.argtypes = [_Ptr(Options), ctypes.c_char_p, ctypes.c_size_t]
_parse_with_options.restype = _Ptr(Output)
_parse_fragment = _dll.gumbo_parse_fragment
_parse_fragment.argtypes = [
_Ptr(Options), ctypes.c_char_p, ctypes.c_size_t, Tag, Namespace]
_parse_fragment.restype = _Ptr(Output)
_tag_from_original_text = _dll.gumbo_tag_from_original_text
_tag_from_original_text.argtypes = [_Ptr(StringPiece)]
_tag_from_original_text.restype = None
_normalize_svg_tagname = _dll.gumbo_normalize_svg_tagname
_normalize_svg_tagname.argtypes = [_Ptr(StringPiece)]
_normalize_svg_tagname.restype = ctypes.c_char_p
_destroy_output = _dll.gumbo_destroy_output
_destroy_output.argtypes = [_Ptr(Options), _Ptr(Output)]
_destroy_output.restype = None
_tagname = _dll.gumbo_normalized_tagname
_tagname.argtypes = [Tag]
_tagname.restype = ctypes.c_char_p
_tag_enum = _dll.gumbo_tag_enum
_tag_enum.argtypes = [ctypes.c_char_p]
_tag_enum.restype = Tag
__all__ = ['StringPiece', 'SourcePosition', 'AttributeNamespace', 'Attribute',
'Vector', 'AttributeVector', 'NodeVector', 'QuirksMode', 'Document',
'Namespace', 'Tag', 'Element', 'Text', 'NodeType', 'Node',
'Options', 'Output', 'parse']
|
apache-2.0
| 2,796,645,549,917,822,500
| 27.159722
| 80
| 0.63469
| false
| 3.426761
| false
| false
| false
|
T3ddy-Bear/PiChat
|
requests/models.py
|
1
|
33804
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
import sys
from io import UnsupportedOperation
from ._internal_utils import to_native_string, unicode_is_ascii
from .auth import HTTPBasicAuth
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .hooks import default_hooks
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .status_codes import codes
from .structures import CaseInsensitiveDict
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
try:
from .packages import idna
except ImportError:
# tolerate the possibility of downstream repackagers unvendoring `requests`
# For more information, read: packages/__init__.py
import idna
sys.modules['requests.packages.idna'] = idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, collections.Mapping))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
apache-2.0
| 8,118,881,208,333,657,000
| 35.192719
| 119
| 0.582475
| false
| 4.491629
| false
| false
| false
|
aldarionsevero/datalogger-ROS-rasp
|
sensors/botbook_mcp3002.py
|
1
|
2047
|
# botbook_mcp3002.py - read analog values from mcp3002
# (c) BotBook.com - Karvinen, Karvinen, Valtokari
# Installing spidev:
# sudo apt-get update
# sudo apt-get -y install git python-dev
# git clone https://github.com/doceme/py-spidev.git
# cd py-spidev/
# sudo python setup.py install
import spidev # installation help in botbook_mcp3002.py comments
import time
def readAnalog(device=0, channel=0):
assert device in (1, 0)
assert channel in (1, 0)
# open spi
spi = spidev.SpiDev()
spi.open(0, device)
"""
Protocol start bit (S), sql/diff (D), odd/sign (C), MSBF (M)
Use leading zero for more stable clock cycle
0000 000S DCM0 0000 0000 0000
Sending 3 8bit packages so xpi.xfer2 will return the same amount.
start bit = 1
sql/diff = 1 SINGLE ENDED MODE (2 channel mode)
odd/sign = channel 0/1
MSBF = 0
"""
command = [1, (2 + channel) << 6, 0]
# 2 + channel shifted 6 to left
# 10 or 11 << 6 = 1000 0000 or 1100 0000
reply = spi.xfer2(command)
"""
Parse right bits from 24 bit package (3*8bit)
We need only data from last 2 bytes.
And there we can discard last two bits to get 10 bit value
as MCP3002 resolution is 10bits
Discard reply[0] byte and start from reply[1] where our data starts
"""
value = reply[1] & 31
# 31 = 0001 1111 with & operation makes sure that we have all data from
# XXXX DDDD and nothing more. 0001 is for signed in next operation.
value = value << 6 # Move to left to make room for next piece of data.
# 000D DDDD << 6 = 0DDD DD00 0000
# Now we get the last of data from reply[2]
value = value + (reply[2] >> 2)
# Here we discard last to bits
# DDDD DDXXX >> 2 = 00DD DDDD
# 0DDD DD00 0000 + 00DD DDDD = 0DDD DDDD DDDD
spi.close()
return value
def main():
# read channel 0 on device 0
value = readAnalog(0, 0)
print value
time.sleep(10)
if __name__ == "__main__":
main()
|
mit
| -913,414,466,716,447,900
| 30.492063
| 75
| 0.622374
| false
| 3.178571
| false
| false
| false
|
maferelo/saleor
|
saleor/graphql/webhook/resolvers.py
|
1
|
1820
|
import graphene
from graphql_jwt.exceptions import PermissionDenied
from ...core.permissions import WebhookPermissions
from ...webhook import models, payloads
from ...webhook.event_types import WebhookEventType
from ..utils import sort_queryset
from .sorters import WebhookSortField
from .types import Webhook, WebhookEvent
def resolve_webhooks(info, sort_by=None, **_kwargs):
service_account = info.context.service_account
if service_account:
qs = models.Webhook.objects.filter(service_account=service_account)
else:
user = info.context.user
if not user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
raise PermissionDenied()
qs = models.Webhook.objects.all()
return sort_queryset(qs, sort_by, WebhookSortField)
def resolve_webhook(info, webhook_id):
service_account = info.context.service_account
if service_account:
_, webhook_id = graphene.Node.from_global_id(webhook_id)
return service_account.webhooks.filter(id=webhook_id).first()
user = info.context.user
if user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
return graphene.Node.get_node_from_global_id(info, webhook_id, Webhook)
raise PermissionDenied()
def resolve_webhook_events():
return [
WebhookEvent(event_type=event_type[0])
for event_type in WebhookEventType.CHOICES
]
def resolve_sample_payload(info, event_name):
service_account = info.context.service_account
required_permission = WebhookEventType.PERMISSIONS.get(event_name)
if service_account and service_account.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
if info.context.user.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
raise PermissionDenied()
|
bsd-3-clause
| 3,332,239,013,513,664,000
| 36.142857
| 79
| 0.734066
| false
| 3.815514
| false
| false
| false
|
mattmillr/utaka
|
src/rest/UtakaBucket.py
|
1
|
7437
|
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from mod_python import apache
import xml.dom.minidom
import utaka.src.core.BucketWithACPAndLog as Bucket
import utaka.src.accessControl.BucketACP as BucketACP
import utaka.src.accessControl.AcpXml as AcpXml
import utaka.src.exceptions.MethodNotAllowedException as MethodNotAllowedException
import utaka.src.exceptions.BadRequestException as BadRequestException
class UtakaBucket:
def __init__(self, utakaReq):
self.utakaReq = utakaReq
def handleRequest(self):
if 'torrent' in self.utakaReq.subresources:
raise BadRequestException.RequestTorrentOfBucketErrorException()
if 'acl' in self.utakaReq.subresources:
if self.utakaReq.req.method == 'GET':
operation = self.__getAclOperation
elif self.utakaReq.req.method == 'PUT':
operation = self.__putAclOperation
else:
raise MethodNotAllowedException.ACLMethodNotAllowedException(self.utakaReq.req.method)
elif 'logging' in self.utakaReq.subresources:
if self.utakaReq.req.method == 'GET':
operation = self.__getLoggingOperation
elif self.utakaReq.req.method == 'PUT':
raise MethodNotAllowedException.BucketLogginStatusMethodException
else:
raise MethodNotAllowedException.LoggingStatusMethodNotAllowedException(self.utakaReq.req.method)
elif self.utakaReq.req.method == 'GET':
operation = self.__getOperation
elif self.utakaReq.req.method == 'PUT':
operation = self.__putOperation
elif self.utakaReq.req.method == 'DELETE':
operation = self.__deleteOperation
elif self.utakaReq.req.method == 'POST':
operation = self.__postOperation
elif self.utakaReq.req.method == 'COPY':
operation = self.__copyOperation
else:
raise MethodNotAllowedException.BucketMethodNotAllowedException(self.utakaReq.req.method)
return operation()
def __copyOperation(self):
pass
def __postOperation(self):
pass
def __deleteOperation(self):
result = Bucket.destroyBucket(bucket=self.utakaReq.bucket, user=self.utakaReq.user)
self.utakaReq.req.status = 204
def __putOperation(self):
cannedACL = self.utakaReq.customHeaderTable.get('acl', 'private')
acp = {}
acp['owner'] = {'userid':self.utakaReq.user}
acl = [{'grantee':{'userid':self.utakaReq.user}, 'permission':'FULL_CONTROL'}]
if cannedACL == 'public-read':
acl.append({'grantee':{'userid':1}, 'permission':'read'})
elif cannedACL == 'public-read-write':
acl.append({'grantee':{'userid':1}, 'permission':'read'})
acl.append({'grantee':{'userid':1}, 'permission':'write'})
elif cannedACL == 'authenticated-read':
acl.append({'grantee':{'userid':2}, 'permission':'read'})
elif cannedACL != 'private':
'''throw error'''
acp['acl'] = acl
result = Bucket.setBucket(bucket = self.utakaReq.bucket, user = self.utakaReq.user, accessControlPolicy = acp)
def __getOperation(self):
getBucketParams = {'name':self.utakaReq.bucket}
for param in 'prefix', 'marker', 'max-keys', 'delimiter':
if param in self.utakaReq.subresources:
getBucketParams[param] = self.utakaReq.subresources[param][0]
if 'max-keys' not in getBucketParams:
getBucketParams['max-keys'] = 1000
res = Bucket.getBucket(bucket = self.utakaReq.bucket, user = self.utakaReq.user,
prefix = getBucketParams.get('prefix'), marker = getBucketParams.get('marker'),
maxKeys = getBucketParams.get('max-keys'), delimiter = getBucketParams.get('delimiter'))
getBucketParams['isTruncated'] = str(res[2])
self.utakaReq.req.content_type = 'application/xml'
self.utakaReq.write(self.__getXMLResponse(getBucketParams, res[0], res[1]))
def __putLoggingOperation(self):
pass
def __getLoggingOperation(self):
Bucket.getBucketLogStatus(user=self.utakaReq.user, bucket=self.utakaReq.bucket)
def __putAclOperation(self):
#READ BODY
acp = AcpXml.fromXML(self.utakaReq.req.read())
Bucket.setBucketACP(user=self.utakaReq.user, bucket=self.utakaReq.bucket, accessControlPolicy=acp)
pass
def __getAclOperation(self):
bucket_acp = Bucket.getBucketACP(bucket=self.utakaReq.bucket, user=self.utakaReq.user)
if len(bucket_acp) == 0:
'''bucket not found, throw error'''
else:
self.utakaReq.req.content_type = 'applicaton/xml'
self.utakaReq.write(AcpXml.toXML(bucket_acp))
def __getXMLResponse(self, bucketDictionary, contentDictionaryList, commonPrefixesList):
doc = xml.dom.minidom.Document()
listBucketEl = doc.createElement("ListBucketResult")
listBucketEl.setAttribute('xmlns', 'http://s3.amazonaws.com/doc/2006-03-01/')
nameEl = doc.createElement("Name")
nameEl.appendChild(doc.createTextNode(bucketDictionary.get('name')))
listBucketEl.appendChild(nameEl)
prefixEl = doc.createElement("Prefix")
prefixEl.appendChild(doc.createTextNode(bucketDictionary.get('prefix', '')))
listBucketEl.appendChild(prefixEl)
markerEl = doc.createElement("Marker")
markerEl.appendChild(doc.createTextNode(bucketDictionary.get('marker', '')))
listBucketEl.appendChild(markerEl)
maxkeysEl = doc.createElement("MaxKeys")
maxkeysEl.appendChild(doc.createTextNode(str(bucketDictionary.get('max-keys', ''))))
listBucketEl.appendChild(maxkeysEl)
truncatedEl= doc.createElement("IsTruncated")
truncatedEl.appendChild(doc.createTextNode(bucketDictionary.get('isTruncated', '')))
listBucketEl.appendChild(truncatedEl)
for val in contentDictionaryList:
contentsEl = doc.createElement("Contents")
keyEl = doc.createElement("Key")
keyEl.appendChild(doc.createTextNode(val['key']))
contentsEl.appendChild(keyEl)
lastModifiedEl = doc.createElement("LastModified")
lastModifiedEl.appendChild(doc.createTextNode(val['lastModified']))
contentsEl.appendChild(lastModifiedEl)
eTagEl = doc.createElement("ETag")
eTagEl.appendChild(doc.createTextNode(val['eTag']))
contentsEl.appendChild(eTagEl)
sizeEl = doc.createElement("Size")
sizeEl.appendChild(doc.createTextNode(str(val['size'])))
contentsEl.appendChild(sizeEl)
storageClassEl = doc.createElement("StorageClass")
storageClassEl.appendChild(doc.createTextNode("STANDARD"))
contentsEl.appendChild(storageClassEl)
ownerEl = doc.createElement("Owner")
ownerIdEl = doc.createElement("ID")
ownerIdEl.appendChild(doc.createTextNode(str(val['owner']['id'])))
ownerNameEl = doc.createElement("DisplayName")
ownerNameEl.appendChild(doc.createTextNode(val['owner']['name']))
ownerEl.appendChild(ownerIdEl)
ownerEl.appendChild(ownerNameEl)
contentsEl.appendChild(ownerEl)
listBucketEl.appendChild(contentsEl)
if commonPrefixesList:
commonPrefixesEl = doc.createElement("CommonPrefixes")
for val in commonPrefixesList:
commonPrefixEl = doc.createElement("Prefix")
commonPrefixEl.appendChild(doc.createTextNode(val))
commonPrefixesEl.appendChild(commonPrefixEl)
listBucketEl.appendChild(commonPrefixesEl)
doc.appendChild(listBucketEl)
return doc.toxml('utf-8')
|
apache-2.0
| -5,196,733,002,555,355,000
| 36.756345
| 112
| 0.751513
| false
| 3.369733
| false
| false
| false
|
krisb78/django-redactorjs
|
testproject/settings.py
|
1
|
5391
|
import os
# Django settings for testproject project.
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'redactor_testproject', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(
PROJECT_PATH,
'media'
)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(
PROJECT_PATH,
'static'
)
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
(
'redactor-js',
os.path.join(
PROJECT_PATH,
'..',
'redactor-js'
)
),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&6dx22n2y&2k^i+7j3*d+y-yf(hv6e8qi^8gq#5-jo(h7im@7u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'app',
'redactorjs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
bsd-3-clause
| -4,929,814,843,324,972,000
| 31.672727
| 109
| 0.674643
| false
| 3.687415
| false
| false
| false
|
auduny/chains
|
lib/chains/services/sonos/__init__.py
|
1
|
7848
|
from threading import Timer
import chains.service
from chains.common import log
import time, datetime, re, copy
from Queue import Empty
from soco import SoCo, discover
from soco.events import event_listener
class SonosService(chains.service.Service):
def onInit(self):
# Discover Sonos players
self.zones = list(discover())
# Set default player
self.defaultZone = None
if self.zones:
defaultZone = self.config.get('defaultzone')
if defaultZone:
self.defaultZone = self.getZone(defaultZone)
else:
self.defaultZone = self.zones[0]
self.interval = None
self.subscribers = []
self.registerForEvents()
def onStart(self):
while True:
self.checkForNewEvents()
def getZone(self, nameOrId):
for zone in self.zones:
if zone.uid == nameOrId:
return zone
if zone.player_name == nameOrId:
return zone
return self.defaultZone
def action_play(self, zone=None):
zone = self.getZone(zone)
zone.play()
def action_stop(self, zone=None):
zone = self.getZone(zone)
zone.stop()
def action_playUri(self, uri, zone=None, volume=None, playmode=None):
zone = self.getZone(zone)
if volume:
log.info('vol:%s'%volume)
zone.volume = int(volume)
if playmode:
log.info('mode:%s'%playmode)
zone.play_mode = playmode
log.info('zone: %s' % zone)
log.info('playUri: %s' % uri)
zone.play_uri(uri)
def action_setPlayMode(self, mode, zone=None):
zone = self.getZone(zone)
zone.play_mode = mode
def action_getPlaylistNames(self):
zone = self.getZone(zone)
playlists = soc.get_music_library_information('sonos_playlists')
result = []
for playlist in playlists:
result.append( playlist.title )
return result
def action_getPlaylistDicts(self):
zone = self.getZone(zone)
playlists = soc.get_music_library_information('sonos_playlists')
result = []
for playlist in playlists:
result.append( playlist.to_dict )
return result
def action_getPlaylistTracks(self, playlist_name):
zone = self.getZone(zone)
playlists = soc.get_music_library_information('sonos_playlists')
result = []
for playlist in playlists:
if playlist.title == playlist_name:
track_list = zone.browse(playlist)
for item in track_list:
result.append({
title: item.title,
album: item.album,
artist: item.creator,
uri: item.uri,
art: item.album_art_uri
})
return result
return result
def action_playPlaylist(self, name, zone=None, volume=None, playmode=None):
zone = self.getZone(zone)
if volume:
log.info('vol:%s'%volume)
zone.volume = int(volume)
if playmode:
log.info('mode:%s'%playmode)
zone.play_mode = playmode
log.info('pls')
playlists = zone.get_sonos_playlists()
if not playlists:
return False
found = None
for playlist in playlists:
if playlist.title.lower().strip() == name.lower().strip():
found = playlist
break
if not found:
return False
zone.clear_queue()
zone.add_to_queue(playlist)
zone.play_from_queue(0)
def action_setVolume(self, volume, zone=None):
zone = self.getZone(zone)
zone.volume = int(volume)
def action_getVolume(self, zone=None):
zone = self.getZone(zone)
return zone.volume
def action_modifyVolume(self,amount, zone=None):
zone = self.getZone(zone)
amount = int(amount)
zone.volume += amount
def action_volumeUp(self, zone=None):
zone = self.getZone(zone)
zone.volume += 1
def action_volumeDown(self, zone=None):
zone = self.getZone(zone)
zone.volume -= 1
def action_getTrackInfo(self, zone=None):
zone = self.getZone(zone)
info = zone.get_current_track_info()
del info['metadata']
return info
def action_getTrackMetaData(self, zone=None):
zone = self.getZone(zone)
info = zone.get_current_track_info()
return info['metadata']
def action_getTrackUri(self, zone=None):
zone = self.getZone(zone)
info = zone.get_current_track_info()
if not info:
return None
return info.get('uri')
def action_clearQueue(self, zone=None):
zone = self.getZone(zone)
zone.clear_queue()
def action_join(self, slaveZone, masterZone=None):
slaveZone = self.getZone(slaveZone)
masterZone = self.getZone(masterZone)
slaveZone.join(masterZone)
def action_unjoin(self, slaveZone):
slaveZone = self.getZone(slaveZone)
slaveZone.unjoin()
def action_list(self):
result = []
for zone in self.zones:
#result.append({
# 'name': zone.player_name,
# 'id': zone.uid
#})
result.append(zone.get_speaker_info())
return result
def sendEventWrapper(self, property, zone, event):
name = zone.player_name
self.sendEvent(property, event, {
'device': name,
'type': 'speaker',
'location': name
})
def registerForEvent(self, zone=None):
if (zone == None): return
controlSubscriber = zone.renderingControl.subscribe()
soundSubscriber = zone.avTransport.subscribe()
self.subscribers.append({
'zone': zone,
'control': controlSubscriber,
'sound': soundSubscriber
})
def registerForEvents(self):
for zone in self.zones:
self.registerForEvent(zone)
def parseEvents(self, zone):
for subscriber in [zone['control'], zone['sound']]:
try:
event = subscriber.events.get(timeout=0.5)
if 'transport_state' in event.variables:
self.sendEventWrapper('state', zone['zone'], { 'transport': {
'value': event.variables['transport_state'],
'actions': ['play', 'stop']
}})
if 'volume' in event.variables and 'Master' in event.variables['volume']:
volume = int(event.variables['volume']['Master'])
self.sendEventWrapper('volume', zone['zone'], { 'volume': {
'value': volume,
'actions': ['volumeUp', 'volumeDown']
}})
if 'mute' in event.variables and 'Master' in event.variables['mute']:
self.sendEventWrapper('mute', zone['zone'], { 'mute': {
'value': int(event.variables['mute']['Master'])
}})
except Empty:
pass
def checkForNewEvents(self):
for zone in self.subscribers:
self.parseEvents(zone)
def deRegisterForEvent(self, zone):
zone['control'].unsubscribe()
zone['sound'].unsubscribe()
def deRegisterForEvents(self):
for zone in self.subscribers:
self.deRegisterForEvent(zone)
def onShutdown(self):
if self.interval:
self.interval.cancel()
self.deRegisterForEvents()
event_listener.stop()
|
gpl-2.0
| -5,247,336,795,785,983,000
| 29.418605
| 89
| 0.55237
| false
| 4.190069
| false
| false
| false
|
mtougeron/python-openstacksdk
|
examples/network/delete.py
|
1
|
1721
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network examples
Destroy all the pieces parts of a working network.
To run:
python examples/network/delete.py
"""
import sys
from examples import common
from examples import connection
def delete(conn, name):
router = conn.network.find_router(name)
if router is not None:
print(str(router))
subnet = conn.network.find_subnet(name)
if subnet is not None:
print(str(subnet))
if router:
try:
conn.network.router_remove_interface(router, subnet.id)
except Exception:
pass
for port in conn.network.get_subnet_ports(subnet.id):
print(str(port))
conn.delete(port)
if router is not None:
conn.delete(router)
if subnet:
conn.delete(subnet)
network = conn.network.find_network(name)
if network is not None:
print(str(network))
conn.delete(network)
def run_network(opts):
name = opts.data.pop('name', 'netty')
conn = connection.make_connection(opts)
return(delete(conn, name))
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_network))
|
apache-2.0
| -5,908,766,536,051,304,000
| 25.075758
| 75
| 0.662987
| false
| 3.929224
| false
| false
| false
|
zaubermaerchen/imas_cg_api
|
api/skill/serializer.py
|
1
|
1189
|
# coding: utf-8
from rest_framework import serializers
from data.models import Skill, SkillValue
class ListSerializer(serializers.ModelSerializer):
skill_value_list = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Skill
fields = [
'skill_id',
'target_unit',
'target_member',
'target_type',
'target_num',
'target_param',
'skill_value_id',
'skill_value_list',
'comment'
]
@staticmethod
def get_skill_value_list(obj):
return SkillValue.get_value_list(obj.skill_value_id)
class Costar(object):
def __init__(self, name, count):
self.name = name
self.count = count
class CostarSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255)
count = serializers.IntegerField()
def create(self, validated_data):
return Costar(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.count = validated_data.get('count', instance.count)
return instance
|
mit
| -7,853,903,863,662,876,000
| 26.022727
| 72
| 0.619008
| false
| 4.05802
| false
| false
| false
|
vup1120/oq-risklib
|
openquake/commonlib/writers.py
|
1
|
9803
|
# Copyright (c) 2010-2014, GEM Foundation.
#
# NRML is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NRML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with NRML. If not, see <http://www.gnu.org/licenses/>.
import cStringIO
from contextlib import contextmanager
from xml.sax.saxutils import escape, quoteattr
import numpy # this is needed by the doctests, don't remove it
@contextmanager
def floatformat(fmt_string):
"""
Context manager to change the default format string for the
function :func:`openquake.commonlib.writers.scientificformat`.
:param fmt_string: the format to use; for instance '%13.9E'
"""
fmt_defaults = scientificformat.__defaults__
scientificformat.__defaults__ = (fmt_string,) + fmt_defaults[1:]
try:
yield
finally:
scientificformat.__defaults__ = fmt_defaults
zeroset = set(['E', '-', '+', '.', '0'])
def scientificformat(value, fmt='%13.9E', sep=' ', sep2=':'):
"""
:param value: the value to convert into a string
:param fmt: the formatting string to use for float values
:param sep: separator to use for vector-like values
:param sep2: second separator to use for matrix-like values
Convert a float or an array into a string by using the scientific notation
and a fixed precision (by default 10 decimal digits). For instance:
>>> scientificformat(-0E0)
'0.000000000E+00'
>>> scientificformat(-0.004)
'-4.000000000E-03'
>>> scientificformat([0.004])
'4.000000000E-03'
>>> scientificformat([0.01, 0.02], '%10.6E')
'1.000000E-02 2.000000E-02'
>>> scientificformat([[0.1, 0.2], [0.3, 0.4]], '%4.1E')
'1.0E-01:2.0E-01 3.0E-01:4.0E-01'
"""
if isinstance(value, basestring):
return value
elif isinstance(value, (int, long)):
return str(value)
elif hasattr(value, '__len__'):
return sep.join((scientificformat(f, fmt, sep2) for f in value))
elif isinstance(value, float):
fmt_value = fmt % value
if set(fmt_value) <= zeroset:
# '-0.0000000E+00' is converted into '0.0000000E+00
fmt_value = fmt_value.replace('-', '')
return fmt_value
return str(value)
class StreamingXMLWriter(object):
"""
A stream-based XML writer. The typical usage is something like this::
with StreamingXMLWriter(output_file) as writer:
writer.start_tag('root')
for node in nodegenerator():
writer.serialize(node)
writer.end_tag('root')
"""
def __init__(self, stream, indent=4, encoding='utf-8', nsmap=None):
"""
:param stream: the stream or a file where to write the XML
:param int indent: the indentation to use in the XML (default 4 spaces)
"""
self.stream = stream
self.indent = indent
self.encoding = encoding
self.indentlevel = 0
self.nsmap = nsmap
def shorten(self, tag):
"""
Get the short representation of a fully qualified tag
:param str tag: a (fully qualified or not) XML tag
"""
if tag.startswith('{'):
ns, _tag = tag.rsplit('}')
tag = self.nsmap.get(ns[1:], '') + _tag
return tag
def _write(self, text):
"""Write text by respecting the current indentlevel"""
if not isinstance(text, str):
text = text.encode(self.encoding, 'xmlcharrefreplace')
spaces = ' ' * (self.indent * self.indentlevel)
self.stream.write(spaces + text.strip() + '\n')
def emptyElement(self, name, attrs):
"""Add an empty element (may have attributes)"""
attr = ' '.join('%s=%s' % (n, quoteattr(scientificformat(v)))
for n, v in sorted(attrs.iteritems()))
self._write('<%s %s/>' % (name, attr))
def start_tag(self, name, attrs=None):
"""Open an XML tag"""
if not attrs:
self._write('<%s>' % name)
else:
self._write('<' + name)
for (name, value) in sorted(attrs.items()):
self._write(
' %s=%s' % (name, quoteattr(scientificformat(value))))
self._write('>')
self.indentlevel += 1
def end_tag(self, name):
"""Close an XML tag"""
self.indentlevel -= 1
self._write('</%s>' % name)
def serialize(self, node):
"""Serialize a node object (typically an ElementTree object)"""
if self.nsmap is not None:
tag = self.shorten(node.tag)
else:
tag = node.tag
if not node and node.text is None:
self.emptyElement(tag, node.attrib)
return
self.start_tag(tag, node.attrib)
if node.text is not None:
self._write(escape(scientificformat(node.text).strip()))
for subnode in node:
self.serialize(subnode)
self.end_tag(tag)
def __enter__(self):
"""Write the XML declaration"""
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
return self
def __exit__(self, etype, exc, tb):
"""Close the XML document"""
pass
def tostring(node, indent=4):
"""
Convert a node into an XML string by using the StreamingXMLWriter.
This is useful for testing purposes.
:param node: a node object (typically an ElementTree object)
:param indent: the indentation to use in the XML (default 4 spaces)
"""
out = cStringIO.StringIO()
writer = StreamingXMLWriter(out, indent)
writer.serialize(node)
return out.getvalue()
def save_csv(dest, header_rows, sep=',', fmt='%12.8E', mode='wb'):
"""
:param dest: destination filename
:param header_rows: header + rows to save
:param sep: separator to use (default comma)
:param fmt: formatting string (default '%12.8E')
:param mode: file open mode (default 'wb')
"""
with open(dest, mode) as f:
for row in header_rows:
f.write(sep.join(scientificformat(col, fmt) for col in row) + '\n')
return dest
# recursive function used internally by build_header
def _build_header(dtype, root):
header = []
if dtype.fields is None:
if not root:
return []
return [root + (str(dtype), dtype.shape)]
for field in dtype.fields:
dt = dtype.fields[field][0]
if dt.subdtype is None: # nested
header.extend(_build_header(dt, root + (field,)))
else:
numpytype = str(dt.subdtype[0])
header.append(root + (field, numpytype, dt.shape))
return header
def build_header(dtype):
"""
Convert a numpy nested dtype into a list of strings suitable as header
of csv file.
>>> imt_dt = numpy.dtype([('PGA', float, 3), ('PGV', float, 4)])
>>> build_header(imt_dt)
['PGV:float64:4', 'PGA:float64:3']
>>> gmf_dt = numpy.dtype([('A', imt_dt), ('B', imt_dt),
... ('idx', numpy.uint32)])
>>> build_header(gmf_dt)
['A-PGV:float64:4', 'A-PGA:float64:3', 'B-PGV:float64:4', 'B-PGA:float64:3', 'idx:uint32:']
"""
header = _build_header(dtype, ())
h = []
for col in header:
name = '-'.join(col[:-2])
numpytype = col[-2]
shape = col[-1]
h.append(':'.join([name, numpytype, ':'.join(map(str, shape))]))
return h
def extract_from(data, fields):
"""
Extract data from numpy arrays with nested records.
>>> imt_dt = numpy.dtype([('PGA', float, 3), ('PGV', float, 4)])
>>> a = numpy.array([([1, 2, 3], [4, 5, 6, 7])], imt_dt)
>>> extract_from(a, ['PGA'])
array([[ 1., 2., 3.]])
>>> gmf_dt = numpy.dtype([('A', imt_dt), ('B', imt_dt),
... ('idx', numpy.uint32)])
>>> b = numpy.array([(([1, 2, 3], [4, 5, 6, 7]),
... ([1, 2, 4], [3, 5, 6, 7]), 8)], gmf_dt)
>>> extract_from(b, ['idx'])
array([8], dtype=uint32)
>>> extract_from(b, ['B', 'PGV'])
array([[ 3., 5., 6., 7.]])
"""
for f in fields:
data = data[f]
return data
def write_csv(dest, data, sep=',', fmt='%12.8E', header=None):
"""
:param dest: destination filename
:param data: array to save
:param sep: separator to use (default comma)
:param fmt: formatting string (default '%12.8E')
:param header:
optional list with the names of the columns to display
"""
try:
# see if data is a composite numpy array
data.dtype.fields
except AttributeError:
# not a composite array
header = header or []
else:
header = header or build_header(data.dtype)
with open(dest, 'wb') as f:
if header:
f.write(sep.join(header) + '\n')
all_fields = [col.split(':', 1)[0].split('-')
for col in header]
for record in data:
row = []
for fields in all_fields:
row.append(extract_from(record, fields))
f.write(sep.join(scientificformat(col, fmt)
for col in row) + '\n')
else:
for row in data:
f.write(sep.join(scientificformat(col, fmt)
for col in row) + '\n')
return dest
|
agpl-3.0
| 344,428,272,819,919,700
| 32.803448
| 95
| 0.571254
| false
| 3.704837
| false
| false
| false
|
tranlyvu/autonomous-vehicle-projects
|
Vehicle Detection/src/vehicle_detection.py
|
1
|
14396
|
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
from skimage.feature import hog
from skimage import color, exposure
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
import time
from moviepy.editor import VideoFileClip
from scipy.ndimage.measurements import label
from IPython.display import HTML
def load_data(my_list):
new_list = []
for image in my_list:
img = cv2.imread(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
new_list.append(img)
return new_list
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=True, feature_vector=False)
return features, hog_image
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=False, feature_vector=feature_vec)
return features
def bin_spatial(img, size=(32, 32)):
color1 = cv2.resize(img[:,:,0], size).ravel()
color2 = cv2.resize(img[:,:,1], size).ravel()
color3 = cv2.resize(img[:,:,2], size).ravel()
return np.hstack((color1, color2, color3))
def color_hist(img, nbins=32): #bins_range=(0, 256)
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins)
channel2_hist = np.histogram(img[:,:,1], bins=nbins)
channel3_hist = np.histogram(img[:,:,2], bins=nbins)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs,
color_space='RGB',
spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8,
cell_per_block=2,
hog_channel=0,
spatial_feat=True,
hist_feat=True,
hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for image in imgs:
file_features = []
# Read in each one by one
#image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
img_boxes = []
def convert_color(img, conv='RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'BGR2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
draw_img = np.copy(img)
#img = img.astype(np.float32)/255
heat_map = np.zeros_like(img[:,:,0]).astype(np.float)
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6)
img_boxes.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw, ytop_draw+win_draw+ystart)))
heat_map[ytop_draw+ystart:ytop_draw+win_draw+ystart, xbox_left:xbox_left+win_draw] +=1
return draw_img, heat_map
def process_image(img):
# Find final boxes from heatmap using label function
out_img, heatmap = find_cars(img,
ystart=YSTART,
ystop=YSTOP,
scale=SCALE,
svc = SVC,
X_scaler = X_scaler,
orient= ORIENTATION,
pix_per_cell = PIX_PER_CELL,
cell_per_block= CELL_PER_BLOCK,
spatial_size = SPATIAL_SIZE,
hist_bins = HIST_BINS)
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(img), labels)
return draw_img
if __name__ == "__main__":
vehicles_images = glob.glob('../../../vehicles/vehicles/*/*.png')
non_vehicles_images = glob.glob('../../../non-vehicles/non-vehicles/*/*.png')
cars = load_data(vehicles_images)
non_cars = load_data(non_vehicles_images)
"""Parameters"""
COLOR_SPACE = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
ORIENTATION = 9 # HOG orientations
PIX_PER_CELL = 8 # HOG pixels per cell
CELL_PER_BLOCK = 2 # HOG cells per block
HOG_CHANNEL = "ALL" # Can be 0, 1, 2, or "ALL"
SPATIAL_SIZE = (16, 16) # Spatial binning dimensions
HIST_BINS = 16 # Number of histogram bins
IS_SPATIAL_FEAT = True # Spatial features on or off
IS_HIST_FEAT = True # Histogram features on or off
IS_HOG_FEAT = True # HOG features on or off
t=time.time()
car_features = extract_features(cars,
color_space = COLOR_SPACE,
spatial_size= SPATIAL_SIZE,
hist_bins = HIST_BINS,
orient = ORIENTATION,
pix_per_cell = PIX_PER_CELL,
cell_per_block = CELL_PER_BLOCK,
hog_channel = HOG_CHANNEL,
spatial_feat = IS_SPATIAL_FEAT ,
hist_feat = IS_HIST_FEAT,
hog_feat = IS_HOG_FEAT)
notcar_features = extract_features(non_cars,
color_space = COLOR_SPACE,
spatial_size= SPATIAL_SIZE,
hist_bins = HIST_BINS,
orient = ORIENTATION,
pix_per_cell = PIX_PER_CELL,
cell_per_block = CELL_PER_BLOCK,
hog_channel = HOG_CHANNEL,
spatial_feat = IS_SPATIAL_FEAT ,
hist_feat = IS_HIST_FEAT,
hog_feat = IS_HOG_FEAT)
print(time.time()-t, 'Seconds to compute features...')
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
SVC = LinearSVC()
# Check the training time for the SVC
SVC.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(SVC.score(X_test, y_test), 4))
clip1 = VideoFileClip('../project_video.mp4')
video_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
video_output = '../output_videos/project_video.mp4'
|
apache-2.0
| -4,546,993,512,947,049,500
| 43.27044
| 128
| 0.566199
| false
| 3.54931
| true
| false
| false
|
Poofjunior/dxf2gcode
|
core/shape.py
|
1
|
23737
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2008-2015
# Christian Kohlöffel
# Vinzenz Schulz
# Jean-Paul Schouwstra
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from __future__ import division
from math import radians, pi
from copy import deepcopy
import logging
import globals.globals as g
from core.point import Point
from core.linegeo import LineGeo
from core.arcgeo import ArcGeo
from core.holegeo import HoleGeo
from globals.six import text_type
import globals.constants as c
if c.PYQT5notPYQT4:
from PyQt5 import QtCore
else:
from PyQt4 import QtCore
logger = logging.getLogger("Core.Shape")
class Shape(object):
"""
The Shape Class includes all plotting, GUI functionality and export functions
related to the Shapes.
"""
# only need default arguments here because of the change of usage with super in QGraphicsItem
def __init__(self, nr=-1, closed=True, parentEntity=None):
if nr == -1:
return
self.type = "Shape"
self.nr = nr
self.closed = closed
self.cut_cor = 40
self.parentEntity = parentEntity
self.parentLayer = None
self.geos = Geos([])
self.cw = True
self.stmove = None
self.topLeft = None
self.bottomRight = None
self.send_to_TSP = g.config.vars.Route_Optimisation['default_TSP']
self.selected = False
self.disabled = False
self.allowedToChange = True
# preset defaults
self.axis3_start_mill_depth = g.config.vars.Depth_Coordinates['axis3_start_mill_depth']
self.axis3_slice_depth = g.config.vars.Depth_Coordinates['axis3_slice_depth']
self.axis3_mill_depth = g.config.vars.Depth_Coordinates['axis3_mill_depth']
self.f_g1_plane = g.config.vars.Feed_Rates['f_g1_plane']
self.f_g1_depth = g.config.vars.Feed_Rates['f_g1_depth']
# Parameters for drag knife
self.drag_angle = radians(g.config.vars.Drag_Knife_Options['drag_angle'])
# Parameters for laser cutter
self.laser_power = g.config.vars.Laser_Cutter_Options['laser_power']
self.laser_pulses_per_mm = g.config.vars.Laser_Cutter_Options['laser_pulses_per_mm']
def __str__(self):
"""
Standard method to print the object
@return: A string
"""
return "\ntype: %s" % self.type +\
"\nnr: %i" % self.nr +\
"\nclosed: %i" % self.closed +\
"\ncut_cor: %s" % self.cut_cor +\
"\nlen(geos): %i" % len(self.geos) +\
"\ngeos: %s" % self.geos
def tr(self, string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return text_type(QtCore.QCoreApplication.translate("Shape",
string_to_translate))
def setSelected(self, flag=False):
self.selected = flag
def isSelected(self):
return self.selected
def setDisable(self, flag=False):
self.disabled = flag
def isDisabled(self):
return self.disabled
def setToolPathOptimized(self, flag=False):
self.send_to_TSP = flag
def isToolPathOptimized(self):
return self.send_to_TSP
def isDirectionOfGeosCCW(self, geos):
# By calculating the area of the shape
start = geos.abs_el(0).get_start_end_points(True)
summe = 0.0
for geo in geos.abs_iter():
if isinstance(geo, LineGeo):
end = geo.get_start_end_points(False)
summe += (start.x + end.x) * (end.y - start.y)
start = end
elif isinstance(geo, ArcGeo):
segments = 10
for i in range(1, segments + 1):
end = geo.get_point_from_start(i, segments)
summe += (end.x + start.x) * (end.y - start.y)
start = end
if not self.closed:
# if shape is not closed... simply treat it as closed
end = geos.abs_el(0).get_start_end_points(True)
summe += (end.x + start.x) * (end.y - start.y)
if summe == 0: # inconclusive
logger.debug(self.tr("Shoelace method cannot (directly) be applied to this shape"))
# lets take it clock wise with relation to the workpiece zero
start = geos.abs_el(0).get_start_end_points(True)
# get the farthest end point with relation to the start
end = start
distance2 = 0
for geo in geos.abs_iter():
pos_end = geo.get_start_end_points(False)
pos_distance2 = (start - pos_end).length_squared()
if pos_distance2 > distance2:
end = pos_end
distance2 = pos_distance2
direction = start.to3D().cross_product(end.to3D()).z
if -1e-5 < direction < 1e-5: # start and end are aligned wrt to wp zero
direction = start.length_squared() - end.length_squared()
summe = direction
return summe > 0.0
def AnalyseAndOptimize(self):
self.setNearestStPoint(Point())
logger.debug(self.tr("Analysing the shape for CW direction Nr: %s" % self.nr))
if self.isDirectionOfGeosCCW(self.geos):
self.reverse()
logger.debug(self.tr("Had to reverse the shape to be CW"))
self.cw = True
def setNearestStPoint(self, stPoint):
if self.closed:
logger.debug(self.tr("Clicked Point: %s" % stPoint))
start = self.get_start_end_points(True)
logger.debug(self.tr("Old Start Point: %s" % start))
min_geo_nr, _ = min(enumerate(self.geos.abs_iter()),
key=lambda geo: geo[1].get_start_end_points(True).distance(stPoint))
# Overwrite the geometries in changed order.
self.geos = Geos(self.geos[min_geo_nr:] + self.geos[:min_geo_nr])
start = self.get_start_end_points(True)
logger.debug(self.tr("New Start Point: %s" % start))
def reverse(self, geos=None):
if not geos:
geos = self.geos
geos.reverse()
for geo in geos:
geo.reverse()
self.cw = not self.cw
def switch_cut_cor(self):
"""
Switches the cutter direction between 41 and 42.
G41 = Tool radius compensation left.
G42 = Tool radius compensation right
"""
if self.cut_cor == 41:
self.cut_cor = 42
elif self.cut_cor == 42:
self.cut_cor = 41
def append(self, geo):
geo.make_abs_geo(self.parentEntity)
self.geos.append(geo)
def get_start_end_points_physical(self, start_point=None, angles=None):
"""
With multiple slices end point could be start point.
e.g. useful for the optimal rout etc
"""
if start_point or self.closed:
return self.get_start_end_points(start_point, angles)
else:
max_slice = max(self.axis3_slice_depth, self.axis3_mill_depth - self.axis3_start_mill_depth)
if max_slice == 0:
end_should_be_start = True
else:
end_should_be_start = (self.axis3_start_mill_depth - self.axis3_mill_depth) // max_slice % 2 == 0
if not end_should_be_start:
return self.get_start_end_points(start_point, angles)
else:
start_stuff = self.get_start_end_points(True, angles)
if angles is False:
end_stuff = start_stuff[0], -start_stuff[1]
else:
end_stuff = start_stuff
if start_point is None:
return start_stuff, end_stuff
else:
return end_stuff
def get_start_end_points(self, start_point=None, angles=None):
if start_point is None:
return (self.geos.abs_el(0).get_start_end_points(True, angles),
self.geos.abs_el(-1).get_start_end_points(False, angles))
elif start_point:
return self.geos.abs_el(0).get_start_end_points(True, angles)
else:
return self.geos.abs_el(-1).get_start_end_points(False, angles)
def make_path(self, drawHorLine, drawVerLine):
for geo in self.geos.abs_iter():
drawVerLine(self, geo.get_start_end_points(True))
geo.make_path(self, drawHorLine)
if self.topLeft is None:
self.topLeft = deepcopy(geo.topLeft)
self.bottomRight = deepcopy(geo.bottomRight)
else:
self.topLeft.detTopLeft(geo.topLeft)
self.bottomRight.detBottomRight(geo.bottomRight)
if not self.closed:
drawVerLine(self, geo.get_start_end_points(False))
def isHit(self, xy, tol):
if self.topLeft.x - tol <= xy.x <= self.bottomRight.x + tol\
and self.bottomRight.y - tol <= xy.y <= self.topLeft.y + tol:
for geo in self.geos.abs_iter():
if geo.isHit(self, xy, tol):
return True
return False
def Write_GCode_for_geo(self, geo, PostPro):
# Used to remove zero length geos. If not, arcs can become a full circle
post_dec = PostPro.vars.Number_Format["post_decimals"]
if isinstance(geo, HoleGeo) or\
round(geo.Ps.x, post_dec) != round(geo.Pe.x, post_dec) or\
round(geo.Ps.y, post_dec) != round(geo.Pe.y, post_dec) or\
isinstance(geo, ArcGeo) and geo.length > 0.5 * 0.1 ** post_dec * pi:
return geo.Write_GCode(PostPro)
else:
return ""
def Write_GCode(self, PostPro):
"""
This method returns the string to be exported for this shape, including
the defined start and end move of the shape.
@param PostPro: this is the Postprocessor class including the methods
to export
"""
if g.config.machine_type == 'drag_knife':
return self.Write_GCode_Drag_Knife(PostPro)
elif g.config.machine_type == 'laser_cutter':
return self.Write_GCode_Laser_Cutter(PostPro)
prv_cut_cor = self.cut_cor
if self.cut_cor != 40 and not g.config.vars.Cutter_Compensation["done_by_machine"]:
self.cut_cor = 40
new_geos = Geos(self.stmove.geos[1:])
else:
new_geos = self.geos
new_geos = PostPro.breaks.getNewGeos(new_geos)
# initialisation of the string
exstr = ""
# Get the mill settings defined in the GUI
safe_retract_depth = self.parentLayer.axis3_retract
safe_margin = self.parentLayer.axis3_safe_margin
max_slice = self.axis3_slice_depth
workpiece_top_Z = self.axis3_start_mill_depth
# We want to mill the piece, even for the first pass, so remove one "slice"
initial_mill_depth = workpiece_top_Z - abs(max_slice)
depth = self.axis3_mill_depth
f_g1_plane = self.f_g1_plane
f_g1_depth = self.f_g1_depth
# Save the initial Cutter correction in a variable
has_reversed = False
# If the Output Format is DXF do not perform more then one cut.
if PostPro.vars.General["output_type"] == 'dxf':
depth = max_slice
if max_slice == 0:
logger.error(self.tr("ERROR: Z infeed depth is null!"))
if initial_mill_depth < depth:
logger.warning(self.tr(
"WARNING: initial mill depth (%i) is lower than end mill depth (%i). Using end mill depth as final depth.") % (
initial_mill_depth, depth))
# Do not cut below the depth.
initial_mill_depth = depth
mom_depth = initial_mill_depth
# Move the tool to the start.
exstr += self.stmove.geos.abs_el(0).Write_GCode(PostPro)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_pre_shape_cut()
# Cutter radius compensation when G41 or G42 is on, AND cutter compensation option is set to be done outside the piece
if self.cut_cor != 40 and PostPro.vars.General["cc_outside_the_piece"]:
exstr += PostPro.set_cut_cor(self.cut_cor)
exstr += PostPro.chg_feed_rate(f_g1_plane)
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
exstr += self.stmove.geos.abs_el(2).Write_GCode(PostPro)
exstr += PostPro.rap_pos_z(
workpiece_top_Z + abs(safe_margin)) # Compute the safe margin from the initial mill depth
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
# Cutter radius compensation when G41 or G42 is on, AND cutter compensation option is set to be done inside the piece
if self.cut_cor != 40 and not PostPro.vars.General["cc_outside_the_piece"]:
exstr += PostPro.set_cut_cor(self.cut_cor)
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
exstr += self.stmove.geos.abs_el(2).Write_GCode(PostPro)
# Write the geometries for the first cut
for geo in new_geos.abs_iter():
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Turning the cutter radius compensation
if self.cut_cor != 40 and PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# Numbers of loops
snr = 0
# Loops for the number of cuts
while mom_depth > depth and max_slice != 0.0:
snr += 1
mom_depth = mom_depth - abs(max_slice)
if mom_depth < depth:
mom_depth = depth
# Erneutes Eintauchen
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
# If it is not a closed contour
if not self.closed:
self.reverse(new_geos)
self.switch_cut_cor()
has_reversed = not has_reversed # switch the "reversed" state (in order to restore it at the end)
# If cutter radius compensation is turned on. Turn it off - because some interpreters cannot handle
# a switch
if self.cut_cor != 40 and not PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# If cutter correction is enabled
if self.cut_cor != 40 and PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.set_cut_cor(self.cut_cor)
for geo in new_geos.abs_iter():
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Turning off the cutter radius compensation if needed
if self.cut_cor != 40 and PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# Do the tool retraction
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(workpiece_top_Z + abs(safe_margin))
exstr += PostPro.rap_pos_z(safe_retract_depth)
# If cutter radius compensation is turned on.
if self.cut_cor != 40 and not PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# Initial value of direction restored if necessary
if has_reversed:
self.reverse(new_geos)
self.switch_cut_cor()
self.cut_cor = prv_cut_cor
# Add string to be added before the shape will be cut.
exstr += PostPro.write_post_shape_cut()
return exstr
def Write_GCode_Drag_Knife(self, PostPro):
"""
This method returns the string to be exported for this shape, including
the defined start and end move of the shape. This function is used for
Drag Knife cutting machine only.
@param PostPro: this is the Postprocessor class including the methods
to export
"""
# initialisation of the string
exstr = ""
# Get the mill settings defined in the GUI
safe_retract_depth = self.parentLayer.axis3_retract
safe_margin = self.parentLayer.axis3_safe_margin
workpiece_top_Z = self.axis3_start_mill_depth
f_g1_plane = self.f_g1_plane
f_g1_depth = self.f_g1_depth
"""
Cutting in slices is not supported for Swivel Knife tool. All is cut at once.
"""
mom_depth = self.axis3_mill_depth
drag_depth = self.axis3_slice_depth
# Move the tool to the start.
exstr += self.stmove.geos.abs_el(0).Write_GCode(PostPro)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_pre_shape_cut()
# Move into workpiece and start cutting into Z
exstr += PostPro.rap_pos_z(
workpiece_top_Z + abs(safe_margin)) # Compute the safe margin from the initial mill depth
exstr += PostPro.chg_feed_rate(f_g1_depth)
# Write the geometries for the first cut
if isinstance(self.stmove.geos.abs_el(1), ArcGeo):
if self.stmove.geos.abs_el(1).drag:
exstr += PostPro.lin_pol_z(drag_depth)
drag = True
else:
exstr += PostPro.lin_pol_z(mom_depth)
drag = False
else:
exstr += PostPro.lin_pol_z(mom_depth)
drag = False
exstr += PostPro.chg_feed_rate(f_g1_plane)
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
for geo in Geos(self.stmove.geos[2:]).abs_iter():
if isinstance(geo, ArcGeo):
if geo.drag:
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(drag_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
drag = True
elif drag:
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
drag = False
elif drag:
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
drag = False
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Do the tool retraction
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(workpiece_top_Z + abs(safe_margin))
exstr += PostPro.rap_pos_z(safe_retract_depth)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_post_shape_cut()
return exstr
def Write_GCode_Laser_Cutter(self, PostPro):
"""
This method returns the string to be exported for this shape, including
the defined start and end move of the shape.
@param PostPro: this is the Postprocessor class including the methods
to export
"""
# Save prior machine state.
prv_cut_cor = self.cut_cor
if self.cut_cor != 40 and not g.config.vars.Cutter_Compensation["done_by_machine"]:
new_geos = Geos(self.stmove.geos[1:])
else:
new_geos = self.geos
new_geos = PostPro.breaks.getNewGeos(new_geos)
# Initialize string to hold all the GCode.
exstr = ""
laser_disable_depth = 0
laser_enable_depth = -0.01
# Save the initial Cutter correction in a variable
has_reversed = False
# Move the tool to the start.
exstr += self.stmove.geos.abs_el(0).Write_GCode(PostPro)
exstr += PostPro.rap_pos_z(laser_disable_depth)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_pre_shape_cut()
# Enable Laser by Restore Z to (non-negative value) 0
exstr += PostPro.rap_pos_z(laser_enable_depth)
# Set the feed rate.
exstr += PostPro.chg_feed_rate(self.f_g1_plane)
if self.cut_cor != 40 and g.config.vars.Cutter_Compensation["done_by_machine"]:
# Enable Cutter Compensation at the start of all shapes.
exstr += PostPro.set_cut_cor(self.cut_cor)
# Apply Lead-In move for all shapes.
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
exstr += self.stmove.geos.abs_el(2).Write_GCode(PostPro)
# Set the desired laser power.
exstr += PostPro.chg_laser_power(self.laser_power)
# Set the desired laser pulses per mm.
exstr += PostPro.chg_laser_pulses_per_mm(self.laser_pulses_per_mm)
# Write the geometries for the cut.
for geo in new_geos.abs_iter():
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Turn off the cutter radius compensation if enabled.
if self.cut_cor != 40 and g.config.vars.Cutter_Compensation["done_by_machine"]:
exstr += PostPro.deactivate_cut_cor()
# Disable Laser by Restore Z to (non-negative value) 0
exstr += PostPro.rap_pos_z(laser_disable_depth)
# Initial value of direction restored if necessary
if has_reversed:
self.reverse(new_geos)
self.switch_cut_cor()
self.cut_cor = prv_cut_cor
# Add string to be added after cutting all shapes..
exstr += PostPro.write_post_shape_cut()
return exstr
class Geos(list):
def __init__(self, *args):
list.__init__(self, *args)
def abs_iter(self):
for geo in list.__iter__(self):
yield geo.abs_geo if geo.abs_geo else geo
else:
raise StopIteration()
def abs_el(self, element):
return self[element].abs_geo if self[element].abs_geo else self[element]
|
gpl-3.0
| 24,751,064,062,946,144
| 36.975369
| 127
| 0.56686
| false
| 3.694319
| true
| false
| false
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/contrib/libsvm/ops/gen_libsvm_ops.py
|
1
|
6994
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: libsvm_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
_decode_libsvm_outputs = ["label", "feature_indices", "feature_values",
"feature_shape"]
_DecodeLibsvmOutput = _collections.namedtuple(
"DecodeLibsvm", _decode_libsvm_outputs)
@tf_export('decode_libsvm')
def decode_libsvm(input, num_features, dtype=_dtypes.float32, label_dtype=_dtypes.int64, name=None):
r"""Convert LibSVM input to tensors. The output consists of
a label and a feature tensor. The shape of the label tensor
is the same as input and the shape of the feature tensor is
`[input_shape, num_features]`.
Args:
input: A `Tensor` of type `string`. Each string is a record in the LibSVM.
num_features: An `int` that is `>= 1`. The number of features.
dtype: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.float32`.
label_dtype: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (label, feature_indices, feature_values, feature_shape).
label: A `Tensor` of type `label_dtype`. A tensor of the same shape as input.
feature_indices: A `Tensor` of type `int64`. A 2-D int64 tensor of dense_shape [N, ndims].
feature_values: A `Tensor` of type `dtype`. A 1-D tensor of any type and dense_shape [N].
feature_shape: A `Tensor` of type `int64`. A 1-D int64 tensor of dense_shape [ndims].
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
num_features = _execute.make_int(num_features, "num_features")
if dtype is None:
dtype = _dtypes.float32
dtype = _execute.make_type(dtype, "dtype")
if label_dtype is None:
label_dtype = _dtypes.int64
label_dtype = _execute.make_type(label_dtype, "label_dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"DecodeLibsvm", input=input, num_features=num_features, dtype=dtype,
label_dtype=label_dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "label_dtype",
_op.get_attr("label_dtype"), "num_features",
_op.get_attr("num_features"))
_execute.record_gradient(
"DecodeLibsvm", _inputs_flat, _attrs, _result, name)
_result = _DecodeLibsvmOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "DecodeLibsvm",
name, _ctx._post_execution_callbacks, input, "dtype", dtype,
"label_dtype", label_dtype, "num_features", num_features)
_result = _DecodeLibsvmOutput._make(_result)
return _result
except _core._FallbackException:
return decode_libsvm_eager_fallback(
input, dtype=dtype, label_dtype=label_dtype,
num_features=num_features, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def decode_libsvm_eager_fallback(input, num_features, dtype=_dtypes.float32, label_dtype=_dtypes.int64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function decode_libsvm
"""
_ctx = ctx if ctx else _context.context()
num_features = _execute.make_int(num_features, "num_features")
if dtype is None:
dtype = _dtypes.float32
dtype = _execute.make_type(dtype, "dtype")
if label_dtype is None:
label_dtype = _dtypes.int64
label_dtype = _execute.make_type(label_dtype, "label_dtype")
input = _ops.convert_to_tensor(input, _dtypes.string)
_inputs_flat = [input]
_attrs = ("dtype", dtype, "label_dtype", label_dtype, "num_features",
num_features)
_result = _execute.execute(b"DecodeLibsvm", 4, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DecodeLibsvm", _inputs_flat, _attrs, _result, name)
_result = _DecodeLibsvmOutput._make(_result)
return _result
_ops.RegisterShape("DecodeLibsvm")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "DecodeLibsvm"
# input_arg {
# name: "input"
# type: DT_STRING
# }
# output_arg {
# name: "label"
# type_attr: "label_dtype"
# }
# output_arg {
# name: "feature_indices"
# type: DT_INT64
# }
# output_arg {
# name: "feature_values"
# type_attr: "dtype"
# }
# output_arg {
# name: "feature_shape"
# type: DT_INT64
# }
# attr {
# name: "dtype"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "label_dtype"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "num_features"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\311\001\n\014DecodeLibsvm\022\t\n\005input\030\007\032\024\n\005label\"\013label_dtype\032\023\n\017feature_indices\030\t\032\027\n\016feature_values\"\005dtype\032\021\n\rfeature_shape\030\t\"\033\n\005dtype\022\004type\032\0020\001:\010\n\0062\004\001\002\003\t\"!\n\013label_dtype\022\004type\032\0020\t:\010\n\0062\004\001\002\003\t\"\027\n\014num_features\022\003int(\0010\001")
|
mit
| 5,052,128,959,870,387,000
| 36.005291
| 420
| 0.659708
| false
| 3.115367
| false
| false
| false
|
demvher/pythondotorg
|
jobs/views.py
|
1
|
8432
|
from braces.views import LoginRequiredMixin, GroupRequiredMixin
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, TemplateView, View
from .forms import JobForm
from .models import Job, JobType, JobCategory
class JobBoardAdminRequiredMixin(GroupRequiredMixin):
group_required = "Job Board Admin"
class JobMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
active_locations = Job.objects.visible().distinct(
'location_slug'
).order_by(
'location_slug',
)
context.update({
'jobs_count': Job.objects.visible().count(),
'active_types': JobType.objects.with_active_jobs(),
'active_categories': JobCategory.objects.with_active_jobs(),
'active_locations': active_locations,
})
return context
class JobList(JobMixin, ListView):
model = Job
paginate_by = 25
job_list_view = True
def get_queryset(self):
return super().get_queryset().visible().select_related()
class JobListMine(JobMixin, ListView):
model = Job
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
if self.request.user.is_authenticated():
q = Q(creator=self.request.user)
else:
raise Http404
return queryset.filter(q)
class JobTypeMenu:
def job_type_view(self):
return True
class JobCategoryMenu:
def job_category_view(self):
return True
class JobLocationMenu:
def job_location_view(self):
return True
class JobListType(JobTypeMenu, JobList):
template_name = 'jobs/job_type_list.html'
def get_queryset(self):
return super().get_queryset().filter(job_types__slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_type'] = JobType.objects.get(slug=self.kwargs['slug'])
return context
class JobListCategory(JobCategoryMenu, JobList):
template_name = 'jobs/job_category_list.html'
def get_queryset(self):
return super().get_queryset().filter(category__slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_category'] = JobCategory.objects.get(slug=self.kwargs['slug'])
return context
class JobListLocation(JobLocationMenu, JobList):
template_name = 'jobs/job_location_list.html'
def get_queryset(self):
return super().get_queryset().filter(location_slug=self.kwargs['slug'])
class JobTypes(JobTypeMenu, JobMixin, ListView):
""" View to simply list JobType instances that have current jobs """
template_name = "jobs/job_types.html"
queryset = JobType.objects.with_active_jobs().order_by('name')
context_object_name = 'types'
class JobCategories(JobCategoryMenu, JobMixin, ListView):
""" View to simply list JobCategory instances that have current jobs """
template_name = "jobs/job_categories.html"
queryset = JobCategory.objects.with_active_jobs().order_by('name')
context_object_name = 'categories'
class JobLocations(JobLocationMenu, JobMixin, TemplateView):
""" View to simply list distinct Countries that have current jobs """
template_name = "jobs/job_locations.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['jobs'] = Job.objects.visible().distinct(
'country', 'city'
).order_by(
'country', 'city'
)
return context
class JobReview(LoginRequiredMixin, JobBoardAdminRequiredMixin, JobMixin, ListView):
template_name = 'jobs/job_review.html'
paginate_by = 20
def get_queryset(self):
return Job.objects.review()
def post(self, request):
try:
job = Job.objects.get(id=request.POST['job_id'])
action = request.POST['action']
except (KeyError, Job.DoesNotExist):
return redirect('jobs:job_review')
if action == 'approve':
job.approve(request.user)
messages.add_message(self.request, messages.SUCCESS, "'%s' approved." % job)
elif action == 'reject':
job.reject(request.user)
messages.add_message(self.request, messages.SUCCESS, "'%s' rejected." % job)
elif action == 'remove':
job.status = Job.STATUS_REMOVED
job.save()
messages.add_message(self.request, messages.SUCCESS, "'%s' removed." % job)
elif action == 'archive':
job.status = Job.STATUS_ARCHIVED
job.save()
messages.add_message(self.request, messages.SUCCESS, "'%s' removed." % job)
return redirect('jobs:job_review')
class JobDetail(JobMixin, DetailView):
model = Job
def get_queryset(self):
""" Show only approved jobs to the public, staff can see all jobs """
qs = Job.objects.select_related()
if self.request.user.is_staff:
return qs
else:
return qs.visible()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(
category_jobs=self.object.category.jobs.select_related('company__name')[:5],
user_can_edit=(self.object.creator == self.request.user)
)
ctx.update(kwargs)
return ctx
class JobDetailReview(LoginRequiredMixin, JobBoardAdminRequiredMixin, JobDetail):
def get_queryset(self):
""" Only staff and creator can review """
if self.request.user.is_staff:
return Job.objects.select_related()
else:
raise Http404()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(
user_can_edit=(
self.object.creator == self.request.user
or self.request.user.is_staff
),
under_review=True,
)
ctx.update(kwargs)
return ctx
class JobCreate(JobMixin, CreateView):
model = Job
form_class = JobForm
def get_success_url(self):
return reverse('jobs:job_thanks')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
if self.request.user.is_authenticated():
kwargs['initial'] = {'email': self.request.user.email}
return kwargs
def form_valid(self, form):
""" set the creator to the current user """
# Associate Job to user if they are logged in
if self.request.user.is_authenticated():
form.instance.creator = self.request.user
return super().form_valid(form)
class JobEdit(JobMixin, UpdateView):
model = Job
form_class = JobForm
def get_queryset(self):
if not self.request.user.is_authenticated():
raise Http404
if self.request.user.is_staff:
return super().get_queryset()
return self.request.user.jobs_job_creator.all()
def form_valid(self, form):
""" set last_modified_by to the current user """
form.instance.last_modified_by = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(
form_action='update',
)
ctx.update(kwargs)
return ctx
class JobChangeStatus(LoginRequiredMixin, JobMixin, View):
"""
Abstract class to change a job's status; see the concrete implentations below.
"""
def post(self, request, pk):
job = get_object_or_404(self.request.user.jobs_job_creator, pk=pk)
job.status = self.new_status
job.save()
messages.add_message(self.request, messages.SUCCESS, self.success_message)
return redirect('job_detail', job.id)
class JobPublish(JobChangeStatus):
new_status = Job.STATUS_APPROVED
success_message = 'Your job listing has been published.'
class JobArchive(JobChangeStatus):
new_status = Job.STATUS_ARCHIVED
success_message = 'Your job listing has been archived and is no longer public.'
|
apache-2.0
| -7,756,300,799,939,174,000
| 29.330935
| 97
| 0.635674
| false
| 3.940187
| false
| false
| false
|
Winnetou/ManuTironis
|
utils/ngram_getter.py
|
1
|
1156
|
# this one doesn't crawl the web, it just and only
# takes xml from
import os
import psycopg2
import uni_to_beta
import xml
import xmlrpc
connect_manu = psycopg2.connect("dbname=manu_tironis user=quellen password=quellen")
manu_cursor = connect_manu.cursor()
def get_raw_text():
pass
def translate(raw_text):
uni_to_beta.st(raw_text)
pass
def get_trigrams(translated):
"""
:param translated: str
:return:
"""
tokens = translated.split()
trigrams = []
for index, word in enumerate(tokens[:-2]):
trigram = " ".join(tokens[index], tokens[index + 1], tokens[index + 2])
trigrams.append(trigram)
# and now save /the planet:/ by calling bulk save
# FIXME FINISH ME!!
manu_cursor.executemany('insert into trigrams values (%s')
def main():
for directory in os.listdir('here'):
for subdir in directory:
for file in subdir:
if file.endswith("_gk.xml"):
xml = file.open.read()
raw_text = get_raw_text(xml)
translated = translate(raw_text)
get_trigrams(translated)
|
cc0-1.0
| -7,046,498,592,810,975,000
| 24.130435
| 84
| 0.605536
| false
| 3.658228
| false
| false
| false
|
docwalter/py3status
|
py3status/modules/uptime.py
|
1
|
4335
|
# -*- coding: utf-8 -*-
"""
Display system uptime.
Configuration parameters:
format: display format for this module
(default 'up {days} days {hours} hours {minutes} minutes')
Format placeholders:
{decades} decades
{years} years
{weeks} weeks
{days} days
{hours} hours
{minutes} minutes
{seconds} seconds
Note: If you don't use one of the placeholders, the value will be carried over
to the next unit. For example, given an uptime of 1h 30min:
If you use {minutes} as your only placeholder, then its value will be 90.
If you use {hours} and {minutes}, then its values will be 1 and 30, respectively.
Examples:
```
# show uptime without zeroes
uptime {
format = 'up [\?if=weeks {weeks} weeks ][\?if=days {days} days ]
[\?if=hours {hours} hours ][\?if=minutes {minutes} minutes ]'
}
# show uptime in multiple formats using group module
group uptime {
format = "up {output}"
uptime {
format = '[\?if=weeks {weeks} weeks ][\?if=days {days} days ]
[\?if=hours {hours} hours ][\?if=minutes {minutes} minutes]'
}
uptime {
format = '[\?if=weeks {weeks}w ][\?if=days {days}d ]
[\?if=hours {hours}h ][\?if=minutes {minutes}m]'
}
uptime {
format = '[\?if=days {days}, ][\?if=hours {hours}:]
[\?if=minutes {minutes:02d}]'
}
}
```
@author Alexis "Horgix" Chotard <alexis.horgix.chotard@gmail.com>, tobes, lasers
@license BSD
SAMPLE OUTPUT
{'full_text': 'up 1 days 18 hours 20 minutes'}
"""
from time import time
class Py3status:
"""
"""
# available configuration parameters
format = 'up {days} days {hours} hours {minutes} minutes'
def post_config_hook(self):
self._decades = self.py3.format_contains(self.format, 'decades')
self._years = self.py3.format_contains(self.format, 'years')
self._weeks = self.py3.format_contains(self.format, 'weeks')
self._days = self.py3.format_contains(self.format, 'days')
self._hours = self.py3.format_contains(self.format, 'hours')
self._minutes = self.py3.format_contains(self.format, 'minutes')
self._seconds = self.py3.format_contains(self.format, 'seconds')
def uptime(self):
# Units will be computed from bare seconds since timedelta only
# provides .days and .seconds anyway. Getting rid of the seconds
# part. Keeping the floating point part would make divmod return
# floats, and thus would require days/hours/minutes/seconds to be
# casted to int before formatting, which would be dirty to handle
# since we can't cast None to int.
with open('/proc/uptime', 'r') as f:
up = int(float(f.readline().split()[0]))
offset = time() - up
cache_timeout = decades = years = weeks = days = hours = minutes = seconds = 0
# Decades
if self._decades:
decades, up = divmod(up, 315360000) # 10 years -> decade
cache_timeout = 315360000
# Years
if self._years:
years, up = divmod(up, 31536000) # 365 days -> year
cache_timeout = 31536000
# Weeks
if self._weeks:
weeks, up = divmod(up, 604800) # 7 days -> week
cache_timeout = 604800
# Days
if self._days:
days, up = divmod(up, 86400) # 24 hours -> day
cache_timeout = 86400
# Hours
if self._hours:
hours, up = divmod(up, 3600) # 60 minutes -> hour
cache_timeout = 3600
# Minutes
if self._minutes:
minutes, up = divmod(up, 60) # 60 seconds -> minute
cache_timeout = 60
# Seconds
if self._seconds:
seconds = up # 1000000000 nanoseconds -> second
cache_timeout = 1
uptime = self.py3.safe_format(self.format, dict(
decades=decades, years=years, weeks=weeks, days=days,
hours=hours, minutes=minutes, seconds=seconds))
return {
'cached_until': self.py3.time_in(sync_to=cache_timeout, offset=offset),
'full_text': uptime
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
bsd-3-clause
| -947,488,740,994,104,300
| 32.091603
| 86
| 0.58985
| false
| 3.606489
| false
| false
| false
|
miracle2k/onkyo-eiscp
|
setup.py
|
1
|
1149
|
#!/usr/bin/env python
# coding: utf8
from setuptools import setup, find_packages
# Get long_description from README
import os
here = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(here, 'README.rst'))
long_description = f.read().strip()
f.close()
setup(
name='onkyo-eiscp',
version='1.2.8',
url='https://github.com/miracle2k/onkyo-eiscp',
license='MIT',
author='Michael Elsdörfer',
author_email='michael@elsdoerfer.com',
description='Control Onkyo receivers over ethernet.',
long_description=long_description,
packages = find_packages(exclude=('tests*',)),
entry_points="""[console_scripts]\nonkyo = eiscp.script:run\n""",
install_requires=['docopt>=0.4.1', 'netifaces', 'xmltodict>=0.12.0'],
platforms='any',
classifiers=[
'Topic :: System :: Networking',
'Topic :: Games/Entertainment',
'Topic :: Multimedia',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
mit
| 784,736,929,684,786,300
| 30.888889
| 73
| 0.641986
| false
| 3.437126
| false
| true
| false
|
abztrakt/labtracker
|
Machine/migrations/0006_auto__add_field_item_unusable__chg_field_item_mac3__chg_field_item_mac.py
|
1
|
12585
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Item.unusable'
db.add_column('Machine_item', 'unusable', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Changing field 'Item.mac3'
db.alter_column('Machine_item', 'mac3', self.gf('Machine.models.MacField')())
# Changing field 'Item.mac2'
db.alter_column('Machine_item', 'mac2', self.gf('Machine.models.MacField')())
# Changing field 'Item.mac1'
db.alter_column('Machine_item', 'mac1', self.gf('Machine.models.MacField')())
def backwards(self, orm):
# Deleting field 'Item.unusable'
db.delete_column('Machine_item', 'unusable')
# Changing field 'Item.mac3'
db.alter_column('Machine_item', 'mac3', self.gf('django.db.models.fields.CharField')(max_length=17))
# Changing field 'Item.mac2'
db.alter_column('Machine_item', 'mac2', self.gf('django.db.models.fields.CharField')(max_length=17))
# Changing field 'Item.mac1'
db.alter_column('Machine_item', 'mac1', self.gf('django.db.models.fields.CharField')(max_length=17))
models = {
'LabtrackerCore.group': {
'Meta': {'object_name': 'Group'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2616'}),
'group_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'it': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.InventoryType']", 'null': 'True', 'blank': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['LabtrackerCore.Item']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'LabtrackerCore.inventorytype': {
'Meta': {'object_name': 'InventoryType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2616'}),
'inv_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'namespace': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'LabtrackerCore.item': {
'Meta': {'object_name': 'Item'},
'it': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.InventoryType']"}),
'item_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'LabtrackerCore.labuser': {
'Meta': {'object_name': 'LabUser'},
'accesses': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'})
},
'Machine.contact': {
'Meta': {'object_name': 'Contact'},
'contact_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'Machine.group': {
'Meta': {'object_name': 'Group', '_ormbases': ['LabtrackerCore.Group']},
'casting_server': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'core': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['LabtrackerCore.Group']", 'unique': 'True', 'primary_key': 'True'}),
'gateway': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_lab': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'Machine.history': {
'Meta': {'object_name': 'History'},
'login_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Item']"}),
'mh_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ms': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['Machine.Status']", 'null': 'True', 'blank': 'True'}),
'session_time': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.LabUser']"})
},
'Machine.item': {
'Meta': {'object_name': 'Item', '_ormbases': ['LabtrackerCore.Item']},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'core': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['LabtrackerCore.Item']", 'unique': 'True', 'primary_key': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Location']"}),
'mac1': ('Machine.models.MacField', [], {}),
'mac2': ('Machine.models.MacField', [], {'blank': 'True'}),
'mac3': ('Machine.models.MacField', [], {'blank': 'True'}),
'manu_tag': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'machine_status'", 'symmetrical': 'False', 'to': "orm['Machine.Status']"}),
'stf_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Type']"}),
'unusable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uw_tag': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wall_port': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'warranty_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'Machine.location': {
'Meta': {'object_name': 'Location'},
'building': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '600'}),
'floor': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ml_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'room': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'usable_threshold': ('django.db.models.fields.IntegerField', [], {'default': '95'})
},
'Machine.platform': {
'Meta': {'object_name': 'Platform'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'platform_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'Machine.status': {
'Meta': {'unique_together': "(('ms_id', 'name'),)", 'object_name': 'Status'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'ms_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'Machine.type': {
'Meta': {'object_name': 'Type'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'mt_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'platform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Platform']"}),
'specs': ('django.db.models.fields.TextField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Machine']
|
apache-2.0
| 8,021,627,425,522,244,000
| 68.916667
| 182
| 0.548669
| false
| 3.649942
| false
| false
| false
|
opennode/waldur-mastermind
|
src/waldur_mastermind/marketplace_checklist/admin.py
|
1
|
1428
|
from django.contrib import admin
from import_export import admin as import_export_admin
from modeltranslation import admin as modeltranslation_admin
from . import models
from .import_export_resources import ChecklistResource
class CategoryAdmin(import_export_admin.ImportExportModelAdmin):
fields = ('icon', 'name', 'description')
class QuestionInline(modeltranslation_admin.TranslationStackedInline):
model = models.Question
fields = ('order', 'description', 'solution', 'correct_answer', 'category', 'image')
class ChecklistCustomerRoleInline(admin.StackedInline):
model = models.ChecklistCustomerRole
fields = ('role',)
class ChecklistProjectRoleInline(admin.StackedInline):
model = models.ChecklistProjectRole
fields = ('role',)
class ChecklistAdmin(
import_export_admin.ImportExportMixin, modeltranslation_admin.TranslationAdmin
):
inlines = [QuestionInline, ChecklistCustomerRoleInline, ChecklistProjectRoleInline]
list_display = ('name', 'description', 'category', 'uuid')
list_filter = ('category',)
fields = ('name', 'description', 'category')
resource_class = ChecklistResource
class AnswerAdmin(admin.ModelAdmin):
list_display = ('user', 'question', 'value')
list_filter = ('question',)
admin.site.register(models.Checklist, ChecklistAdmin)
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Answer, AnswerAdmin)
|
mit
| 3,977,542,510,639,444,000
| 30.043478
| 88
| 0.757003
| false
| 4.08
| false
| false
| false
|
cylc/cylc
|
cylc/flow/wallclock.py
|
1
|
10726
|
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Wall clock related utilities."""
from calendar import timegm
from datetime import datetime, timedelta
from metomi.isodatetime.timezone import (
get_local_time_zone_format, get_local_time_zone, TimeZoneFormatMode)
DATE_TIME_FORMAT_BASIC = "%Y%m%dT%H%M%S"
DATE_TIME_FORMAT_BASIC_SUB_SECOND = "%Y%m%dT%H%M%S.%f"
DATE_TIME_FORMAT_EXTENDED = "%Y-%m-%dT%H:%M:%S"
DATE_TIME_FORMAT_EXTENDED_SUB_SECOND = "%Y-%m-%dT%H:%M:%S.%f"
_FLAGS = {r'utc_mode': False}
RE_DATE_TIME_FORMAT_EXTENDED = (
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:Z|[+-][\d:]+)?")
TIME_FORMAT_BASIC = "%H%M%S"
TIME_FORMAT_BASIC_SUB_SECOND = "%H%M%S.%f"
TIME_FORMAT_EXTENDED = "%H:%M:%S"
TIME_FORMAT_EXTENDED_SUB_SECOND = "%H:%M:%S.%f"
TIME_ZONE_STRING_LOCAL_BASIC = get_local_time_zone_format(
TimeZoneFormatMode.reduced)
TIME_ZONE_STRING_LOCAL_EXTENDED = get_local_time_zone_format(
TimeZoneFormatMode.extended)
TIME_ZONE_STRING_UTC = "Z"
TIME_ZONE_UTC_UTC_OFFSET = (0, 0)
TIME_ZONE_LOCAL_UTC_OFFSET = get_local_time_zone()
TIME_ZONE_LOCAL_UTC_OFFSET_HOURS = TIME_ZONE_LOCAL_UTC_OFFSET[0]
TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES = TIME_ZONE_LOCAL_UTC_OFFSET[1]
TIME_ZONE_LOCAL_INFO = {
"hours": TIME_ZONE_LOCAL_UTC_OFFSET[0],
"minutes": TIME_ZONE_LOCAL_UTC_OFFSET[1],
"string_basic": TIME_ZONE_STRING_LOCAL_BASIC,
"string_extended": TIME_ZONE_STRING_LOCAL_EXTENDED
}
TIME_ZONE_UTC_INFO = {
"hours": TIME_ZONE_UTC_UTC_OFFSET[0],
"minutes": TIME_ZONE_UTC_UTC_OFFSET[1],
"string_basic": TIME_ZONE_STRING_UTC,
"string_extended": TIME_ZONE_STRING_UTC
}
PARSER = None
def get_utc_mode():
"""Return value of UTC mode."""
return _FLAGS['utc_mode']
def set_utc_mode(mode):
"""Set value of UTC mode."""
_FLAGS['utc_mode'] = bool(mode)
def now(override_use_utc=None):
"""Return a current-time datetime.datetime and a UTC timezone flag.
Keyword arguments:
override_use_utc (default None) - a boolean (or None) that, if
True, gives the date and time in UTC. If False, it gives the date
and time in the local time zone. If None, the _FLAGS['utc_mode'] boolean is
used.
"""
if override_use_utc or (override_use_utc is None and _FLAGS['utc_mode']):
return datetime.utcnow(), False
else:
return datetime.now(), True
def get_current_time_string(display_sub_seconds=False, override_use_utc=None,
use_basic_format=False):
"""Return a string representing the current system time.
Keyword arguments:
display_sub_seconds (default False) - a boolean that, if True,
switches on microsecond reporting
override_use_utc (default None) - a boolean (or None) that, if
True, switches on utc time zone reporting. If False, it switches
off utc time zone reporting (even if _FLAGS['utc_mode'] is True). If None,
the _FLAGS['utc_mode'] boolean is used.
use_basic_format (default False) - a boolean that, if True,
represents the date/time without "-" or ":" delimiters. This is
most useful for filenames where ":" may cause problems.
"""
date_time, date_time_is_local = now(override_use_utc=override_use_utc)
return get_time_string(date_time, display_sub_seconds=display_sub_seconds,
override_use_utc=override_use_utc,
date_time_is_local=date_time_is_local,
use_basic_format=use_basic_format)
def get_time_string(date_time, display_sub_seconds=False,
override_use_utc=None, use_basic_format=False,
date_time_is_local=False, custom_time_zone_info=None):
"""Return a string representing the current system time.
Arguments:
date_time - a datetime.datetime object.
Keyword arguments:
display_sub_seconds (default False) - a boolean that, if True,
switches on microsecond reporting
override_use_utc (default None) - a boolean (or None) that, if
True, switches on utc time zone reporting. If False, it switches
off utc time zone reporting (even if _FLAGS['utc_mode'] is True). If None,
the _FLAGS['utc_mode'] boolean is used.
use_basic_format (default False) - a boolean that, if True,
represents the date/time without "-" or ":" delimiters. This is
most useful for filenames where ":" may cause problems.
date_time_is_local - a boolean that, if True, indicates that
the date_time argument object is in the local time zone, not UTC.
custom_time_zone_info (default None) - a dictionary that enforces
a particular time zone. It looks like {"hours": _hours,
"minutes": _minutes, "string": _string} where _hours and _minutes
are the hours and minutes offset from UTC and _string is the string
to use as the time zone designator.
"""
time_zone_string = None
if custom_time_zone_info is not None:
custom_hours = custom_time_zone_info["hours"]
custom_minutes = custom_time_zone_info["minutes"]
if use_basic_format:
custom_string = custom_time_zone_info["string_basic"]
else:
custom_string = custom_time_zone_info["string_extended"]
if date_time_is_local:
date_time_hours = TIME_ZONE_LOCAL_UTC_OFFSET_HOURS
date_time_minutes = TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES
else:
date_time_hours, date_time_minutes = (0, 0)
diff_hours = custom_hours - date_time_hours
diff_minutes = custom_minutes - date_time_minutes
date_time = date_time + timedelta(
hours=diff_hours, minutes=diff_minutes)
time_zone_string = custom_string
elif override_use_utc or (override_use_utc is None and _FLAGS['utc_mode']):
time_zone_string = TIME_ZONE_STRING_UTC
if date_time_is_local:
date_time = date_time - timedelta(
hours=TIME_ZONE_LOCAL_UTC_OFFSET_HOURS,
minutes=TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES
)
else:
if use_basic_format:
time_zone_string = TIME_ZONE_STRING_LOCAL_BASIC
else:
time_zone_string = TIME_ZONE_STRING_LOCAL_EXTENDED
if not date_time_is_local:
diff_hours = TIME_ZONE_LOCAL_UTC_OFFSET_HOURS
diff_minutes = TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES
date_time = date_time + timedelta(
hours=diff_hours, minutes=diff_minutes)
if use_basic_format:
date_time_format_string = DATE_TIME_FORMAT_BASIC
if display_sub_seconds:
date_time_format_string = DATE_TIME_FORMAT_BASIC_SUB_SECOND
else:
date_time_format_string = DATE_TIME_FORMAT_EXTENDED
if display_sub_seconds:
date_time_format_string = DATE_TIME_FORMAT_EXTENDED_SUB_SECOND
date_time_string = date_time.strftime(date_time_format_string)
return date_time_string + time_zone_string
def get_time_string_from_unix_time(unix_time, display_sub_seconds=False,
use_basic_format=False,
custom_time_zone_info=None):
"""Convert a unix timestamp into a local time zone datetime.datetime.
Arguments:
unix_time - an integer or float number of seconds since the Unix
epoch.
Keyword arguments:
display_sub_seconds (default False) - a boolean that, if True,
switches on microsecond reporting
use_basic_format (default False) - a boolean that, if True,
represents the date/time without "-" or ":" delimiters. This is
most useful for filenames where ":" may cause problems.
custom_time_zone_info (default None) - a dictionary that enforces
a particular time zone. It looks like {"hours": _hours,
"minutes": _minutes, "string": _string} where _hours and _minutes
are the hours and minutes offset from UTC and _string is the string
to use as the time zone designator.
"""
date_time = datetime.utcfromtimestamp(unix_time)
return get_time_string(date_time,
display_sub_seconds=display_sub_seconds,
use_basic_format=use_basic_format,
override_use_utc=None,
date_time_is_local=False,
custom_time_zone_info=custom_time_zone_info)
def get_unix_time_from_time_string(datetime_string):
"""Convert a datetime string into a unix timestamp.
The datetime_string must match DATE_TIME_FORMAT_EXTENDED above,
which is the extended ISO 8601 year-month-dayThour:minute:second format,
plus a valid ISO 8601 time zone. For example, 2016-09-07T11:21:00+01:00,
2016-12-25T06:00:00Z, or 2016-12-25T06:00:00+13.
isodatetime is not used to do the whole parsing, partly for performance,
but mostly because the calendar may be in non-Gregorian mode.
"""
try:
date_time_utc = datetime.strptime(
datetime_string, DATE_TIME_FORMAT_EXTENDED + "Z")
except ValueError:
global PARSER
if PARSER is None:
from metomi.isodatetime.parsers import TimePointParser
PARSER = TimePointParser()
time_zone_info = PARSER.get_info(datetime_string)[1]
time_zone_hour = int(time_zone_info["time_zone_hour"])
time_zone_minute = int(time_zone_info.get("time_zone_minute", 0))
offset_seconds = 3600 * time_zone_hour + 60 * time_zone_minute
if "+" in datetime_string:
datetime_string = datetime_string.split("+")[0]
else:
datetime_string = datetime_string.rsplit("-", 1)[0]
date_time = datetime.strptime(
datetime_string, DATE_TIME_FORMAT_EXTENDED)
date_time_utc = date_time - timedelta(seconds=offset_seconds)
return timegm(date_time_utc.timetuple())
def get_seconds_as_interval_string(seconds):
"""Convert a number of seconds into an ISO 8601 duration string."""
from metomi.isodatetime.data import Duration
return str(Duration(seconds=seconds, standardize=True))
|
gpl-3.0
| -7,570,600,737,711,770,000
| 40.573643
| 79
| 0.658214
| false
| 3.592096
| false
| false
| false
|
openweave/happy
|
happy/HappyProcessStart.py
|
1
|
15877
|
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyProcessStart class that stars process within virtual nodes.
#
# Process runs a command in a virtual node, which itself
# is a logical representation of a network namespace.
#
from __future__ import absolute_import
import os
import subprocess
import sys
import time
import psutil
import warnings
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.HappyNode import HappyNode
from happy.HappyProcess import HappyProcess
import happy.HappyProcessStop
options = {}
options["quiet"] = False
options["node_id"] = None
options["tag"] = None
options["command"] = None
options["strace"] = False
options["env"] = {}
options["sync_on_output"] = None
options["rootMode"] = False
def option():
return options.copy()
class HappyProcessStart(HappyNode, HappyProcess):
"""
Starts a happy process.
happy-process-start [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-t --tag <DAEMON_NAME>] [-s --strace]
[-e --env <ENVIRONMENT>] <COMMAND>
-i --id Optional. Node on which to run the process. Find using
happy-node-list or happy-state.
-t --tag Required. Name of the process.
-s --strace Optional. Enable strace output for the process.
-e --env Optional. An environment variable to pass to the node
for use by the process.
<COMMAND> Required. The command to run as process <DAEMON_NAME>.
Example:
$ happy-process-start BorderRouter ContinuousPing ping 127.0.0.1
Starts a process within the BorderRouter node called ContinuousPing
that runs "ping 127.0.0.1" continuously.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNode.__init__(self)
HappyProcess.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tag = opts["tag"]
self.command = opts["command"]
self.strace = opts["strace"]
self.env = opts["env"]
self.sync_on_output = opts["sync_on_output"]
self.output_fileput_suffix = ".out"
self.strace_suffix = ".strace"
self.rootMode = opts["rootMode"]
def __stopProcess(self):
emsg = "Process %s stops itself." % (self.tag)
self.logger.debug("[%s] daemon [%s]: %s" % (self.node_id, self.tag, emsg))
options = happy.HappyProcessStop.option()
options["node_id"] = self.node_id
options["tag"] = self.tag
options["quiet"] = self.quiet
stopProcess = happy.HappyProcessStop.HappyProcessStop(options)
stopProcess.run()
self.readState()
def __pre_check(self):
# Check if the new process is given
if not self.tag:
emsg = "Missing name of the new process to start."
self.logger.error("[localhost] HappyProcessStart: %s" % (emsg))
self.exit()
# Check if the name of new process is not a duplicate (that it does not already exists).
if self.processExists(self.tag):
emsg = "virtual process %s already exist." % (self.tag)
self.logger.info("[%s] HappyProcessStart: %s" % (self.node_id, emsg))
self.__stopProcess()
# Check if the process command is given
if not self.command:
emsg = "Missing process command."
self.logger.error("[localhost] HappyProcessStart: %s" % (emsg))
self.exit()
timeStamp = "%010.6f" % time.time()
pid = "%06d" % os.getpid()
emsg = "Tag: %s PID: %s timeStamp : %s" % (self.tag, pid, timeStamp)
self.logger.debug("[%s] HappyProcessStart: %s" % (self.node_id, emsg))
self.output_file = self.process_log_prefix + pid + \
"_" + timeStamp + "_" + self.tag + self.output_fileput_suffix
self.strace_file = self.process_log_prefix + pid + \
"_" + timeStamp + "_" + self.tag + self.strace_suffix
def __poll_for_output(self):
poll_interval_sec = 0.01
max_poll_time_sec = 180
time_slept = 0
tail = open(self.output_file, "r")
self.logger.debug("[%s] HappyProcessStart: polling for output: %s" % (self.node_id, self.sync_on_output))
while (True):
line = tail.readline()
if not line:
time.sleep(poll_interval_sec)
time_slept += poll_interval_sec
poll_interval_sec *= 2
if (time_slept > max_poll_time_sec):
self.logger.debug("[%s] HappyProcessStart: can't find the output requested: %s" %
(self.node_id, self.sync_on_output))
raise RuntimeError("Can't find the output requested")
elif self.sync_on_output in line:
self.logger.debug("[%s] HappyProcessStart: found output: %s in %s secs" %
(self.node_id, self.sync_on_output, str(time_slept)))
break
else:
continue
tail.close()
return
def __start_daemon(self):
cmd = self.command
# We need to support 8 combinations:
# Who: user or root
# strace: yes or not
# env: yes or not
# Given this script called sayhello.sh:
# #!/bin/bash
# echo Hello ${USER}!
# echo You passed the following opts $1, $2, $3
# echo MYENVVAR is $MYENVVAR
# a successful run with an environment variable prints:
# Hello andreello!
# You passed the following opts a, b, c
# MYENVVAR is hello
# The goal is to use the simples command line possible; in particular, we don't
# want to call sudo unless strictly necessary (for performance reasons).
# Here is how the CLI looks like if you use "ip netns exec" directly:
# user without env:
# sudo ip netns exec happy000 sudo -u andreello ./sayhello.sh a b c
# user with env:
# sudo ip netns exec happy000 sudo -u andreello MYENVVAR=hello ./sayhello.sh a b c
# root without env:
# ip netns exec happy000 ./sayhello.sh a b c
# root with env
# ip netns exec happy000 bash -c 'MYENVVAR=hello ./sayhello.sh a b c'
# user with strace, without env
# sudo ip netns exec happy000 sudo -u andreello strace -tt -o strace.out ./sayhello.sh a b c
# user with strace, with env
# sudo ip netns exec happy000 sudo -u andreello strace -tt -o strace.out -E MYENVVAR=hello ./sayhello.sh a b c
# root with strace, without env
# ip netns exec happy000 strace -tt -o strace.out ./sayhello.sh a b c
# root with strace, with env
# ip netns exec happy000 strace -tt -o strace.out -E MYENVVAR=hello ./sayhello.sh a b c
# Highlights:
# - to pass environment variables, either 'strace -E' or 'bash -c'
# - but, 'bash -c' requires the command to be in one string, while 'strace -E' requires the opposite
# - the examples above show the argument to 'bash -c' in quotes, but they are not necessary when passing
# the list of strings to Popen()
# - also, the examples above show only one env var; if passing more than one to strace, they need to have
# a '-E' each
# In summary, it's easier to build the cmd as a full string, and then split it the right way depending
# on strace vs bash.
# Here are a few examples of how the string is split into a list:
#
# user without env:
# ./bin/happy-process-start.py -i node01 -t HELLO ./sayhello.sh a b c
# [u'sudo', u'ip', u'netns', u'exec', u'happy000', u'sudo', u'-u', u'andreello', u'./sayhello.sh', u'a', u'b', u'c']
#
# user with env:
# ./bin/happy-process-start.py -i node01 -e "MYENVVAR=hello" -t HELLO ./sayhello.sh a b c
# [u'sudo', u'ip', u'netns', u'exec', u'happy000', u'sudo', u'-u', u'andreello',
# u'MYENVVAR=hello', u'./sayhello.sh', u'a', u'b', u'c']
#
# root without env:
# sudo ./bin/happy-process-start.py -i node01 -t HELLO ./sayhello.sh a b c
# [u'ip', u'netns', u'exec', u'happy000', u'./sayhello.sh', u'a', u'b', u'c']
#
# user with env and strace:
# ./bin/happy-process-start.py -i node01 -e "MYENVVAR=hello" -s -t HELLO ./sayhello.sh a b c
# [u'sudo', u'ip', u'netns', u'exec', u'happy000', u'sudo', u'-u', u'andreello', u'strace', u'-tt', u'-o',
# u'/tmp/happy_..._HELLO.strace', u'-E', u'MYENVVAR=hello', u'./sayhello.sh', u'a', u'b', u'c']
#
# root with env:
# [u'ip', u'netns', u'exec', u'happy000', 'bash', '-c', u' MYENVVAR=hello ./sayhello.sh a b c']
#
# root with strace no env:
# sudo ./bin/happy-process-start.py -i node01 -s -t HELLO ./sayhello.sh a b c
#
# root with strace and env:
# [u'ip', u'netns', u'exec', u'happy000', u'strace', u'-tt', u'-o', u'/tmp/happy_..._HELLO.strace',
# u'-E', u'MYENVVAR=hello', u'./sayhello.sh', u'a', u'b', u'c']
need_internal_sudo = False
if os.getuid() != 0:
need_internal_sudo = True
if "sudo" in cmd.split():
# The command already has the inner sudo; typical case is that
# a normal user started Happy, and the script needs to run
# a command in a node as root. If sudo is for root, remove it.
# TODO: properly support "sudo -u" with strace
cmd = self.stripRunAsRoot(cmd)
need_internal_sudo = False
env_vars_list = []
cmd_list_prefix = []
need_bash = False
if "bash -c" in cmd:
tmp = cmd.split("bash -c")
need_bash = True
cmd_list_prefix = tmp[0].split()
cmd = tmp[1]
for key, value in self.env.items():
tmp = ""
try:
tmp = "" + key + "=" + value
env_vars_list.append(tmp)
except:
self.logger.error("Failed to serialize environment variable %s" % (key));
self.logger.debug("HappyProcessStart with env: > %s" % (env_vars_list))
if self.strace:
cmd_list_prefix = ["strace", "-tt", "-o", self.strace_file] + cmd_list_prefix
tmp = []
for i in env_vars_list:
tmp.append("-E")
tmp.append(i)
env_vars_list = tmp
elif need_internal_sudo:
pass
elif len(env_vars_list):
need_bash = True
if need_internal_sudo:
if self.rootMode:
tmp = self.getRunAsRootPrefixList()
else:
tmp = self.getRunAsUserPrefixList()
cmd_list_prefix = tmp + cmd_list_prefix
if self.node_id:
cmd_list_prefix = ["ip", "netns", "exec", self.uniquePrefix(self.node_id)] + cmd_list_prefix
cmd_list_prefix = self.getRunAsRootPrefixList() + cmd_list_prefix
try:
self.fout = open(self.output_file, "wb", 0)
except Exception:
emsg = "Failed to open file %s." % (self.output_file)
self.logger.error("[%s] HappyProcessStart: %s." % (self.node_id, emsg))
self.exit()
self.logger.debug("HappyProcessStart: > %s" % (cmd))
popen = None
try:
cmd_list = []
if need_bash:
env_vars_list = []
for key, value in self.env.items():
tmp = ""
try:
tmp = "" + key + '="' + value.replace('\\','\\\\').replace('"','\\"') +'"'
env_vars_list.append(tmp)
except:
self.logger.error("Failed to serialize environment variable %s" % (key));
cmd = " ".join(env_vars_list) + ' ' + cmd
cmd_list = cmd_list_prefix + ["bash", "-c", cmd]
else:
cmd_list = cmd_list_prefix + env_vars_list + cmd.split()
self.logger.debug("[%s] HappyProcessStart: executing command list %s" % (self.node_id, cmd_list))
popen = subprocess.Popen(cmd_list, stdin=subprocess.PIPE, stdout=self.fout)
self.child_pid = popen.pid
emsg = "running daemon %s (PID %d)" % (self.tag, self.child_pid)
self.logger.debug("[%s] HappyProcessStart: %s" % (self.node_id, emsg))
# The following is guaranteed to fetch info about the right process (i.e. the PID has
# no chance of being reused) because even if the child process terminates right away, it'll stay
# around in <defunct> until the popen object has been destroyed or popen.poll() has
# been called.
p = psutil.Process(self.child_pid)
# At python.psutil 2.0.0, create_time changed from a data
# member to a member function. Try to access the modern member
# function first. If that throws, try the old data member.
try:
self.create_time = p.create_time()
except Exception:
self.create_time = p.create_time
emsg = "Create time: " + str(self.create_time)
self.logger.debug("[%s] HappyProcessStart: %s." % (self.node_id, emsg))
if self.sync_on_output:
self.__poll_for_output()
except Exception as e:
if popen:
# We need to kill the process tree; if popen succeeded,
# we assume we were also able to get the create_time
self.TerminateProcessTree(popen.pid, self.create_time)
emsg = "Starting process with command %s FAILED with %s." % (cmd, str(e))
self.logger.error("[%s] HappyProcessStart: %s." % (self.node_id, emsg))
self.exit()
def __post_check(self):
pass
def __update_state(self):
emsg = "Update State with tag %s running command: %s" % \
(self.tag, self.command)
self.logger.debug("[%s] HappyProcessStart: %s ." % (self.node_id, emsg))
new_process = {}
new_process["pid"] = self.child_pid
new_process["out"] = self.output_file
new_process["strace"] = self.strace_file
new_process["command"] = self.command
new_process["create_time"] = self.create_time
self.setNodeProcess(new_process, self.tag, self.node_id)
self.writeState()
def run(self):
with self.getStateLockManager():
self.readState()
self.__pre_check()
self.__start_daemon()
self.__update_state()
self.__post_check()
return ReturnMsg(0)
|
apache-2.0
| 3,041,653,095,024,345,000
| 38.009828
| 124
| 0.552749
| false
| 3.652404
| false
| false
| false
|
recombinators/worker
|
models.py
|
1
|
9335
|
import os
import transaction
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, UnicodeText, Boolean, DateTime
from datetime import datetime
import requests
mailgun_key = os.environ['MAILGUN_KEY']
mailgun_url = os.environ['MAILGUN_URL']
DBSession = scoped_session(
sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
engine = create_engine(os.environ.get('DATABASE_URL'))
DBSession.configure(bind=engine)
Base.metadata.bind = engine
class WorkerLog(Base):
"""Model for the worker log."""
__tablename__ = 'worker_log'
id = Column(Integer, primary_key=True)
instanceid = Column(UnicodeText)
date_time = Column(DateTime)
statement = Column(UnicodeText)
value = Column(UnicodeText)
activitytype = Column(UnicodeText)
@classmethod
def log_entry(cls, instanceid, statement, value, activity_type):
current_time = datetime.utcnow()
entry = WorkerLog(instanceid=instanceid,
date_time=current_time,
statement=statement,
value=value,
activitytype=activity_type)
DBSession.add(entry)
transaction.commit()
class RenderCache_Model(Base):
"""
Model for the already rendered files.
"""
__tablename__ = 'render_cache'
id = Column(Integer, primary_key=True)
jobid = Column(Integer)
entityid = Column(UnicodeText)
band1 = Column(Integer)
band2 = Column(Integer)
band3 = Column(Integer)
previewurl = Column(UnicodeText)
renderurl = Column(UnicodeText)
rendercount = Column(Integer, default=0)
currentlyrend = Column(Boolean)
@classmethod
def add(cls, jobid, currentlyrend):
"""
Method adds entry into db given jobid and optional url.
"""
jobQuery = DBSession.query(UserJob_Model).get(jobid)
job = RenderCache_Model(entityid=jobQuery.entityid,
jobid=jobid,
band1=jobQuery.band1,
band2=jobQuery.band2,
band3=jobQuery.band3,
currentlyrend=currentlyrend)
DBSession.add(job)
transaction.commit()
@classmethod
def update(cls, jobid, currentlyrend, renderurl):
"""
Method updates entry into db given jobid and optional url.
"""
try:
DBSession.query(cls).filter(cls.jobid == jobid).update({
"currentlyrend": currentlyrend, "renderurl": renderurl})
transaction.commit()
except:
print 'Could not update database.'
@classmethod
def update_p_url(cls, scene, band1, band2, band3, previewurl):
"""
Method updates entry into db with preview url.
"""
# Convert parameters into correct type
band1, band2, band3 = int(band1), int(band2), int(band3)
previewurl = u'{}'.format(previewurl)
try:
entry = DBSession.query(cls).filter(cls.entityid == scene,
cls.band1 == band1,
cls.band2 == band2,
cls.band3 == band3).first()
# update entry if already exists,
# if there is no existing entry, add it.
if entry:
entry.update({"previewurl": previewurl})
transaction.commit()
else:
new = RenderCache_Model(entityid=scene,
band1=band1,
band2=band2,
band3=band3,
previewurl=previewurl
)
DBSession.add(new)
transaction.commit()
except:
print 'Could not add the preview URL to the database.'
class UserJob_Model(Base):
"""
Model for the user job queue. Possible job statuses:
status_key = {
0: "In queue",
1: "Downloading",
2: "Processing",
3: "Compressing",
4: "Uploading to server",
5: "Done",
10: "Failed"}
"""
__tablename__ = 'user_job'
jobid = Column(Integer, primary_key=True)
entityid = Column(UnicodeText)
userip = Column(UnicodeText)
email = Column(UnicodeText)
band1 = Column(Integer)
band2 = Column(Integer)
band3 = Column(Integer)
jobstatus = Column(Integer, nullable=False)
starttime = Column(DateTime, nullable=False)
lastmodified = Column(DateTime, nullable=False)
status1time = Column(DateTime)
status2time = Column(DateTime)
status3time = Column(DateTime)
status4time = Column(DateTime)
status5time = Column(DateTime)
status10time = Column(DateTime)
rendertype = Column(UnicodeText)
workerinstanceid = Column(UnicodeText)
@classmethod
def new_job(cls,
entityid=entityid,
band1=4,
band2=3,
band3=2,
jobstatus=0,
starttime=datetime.utcnow(),
rendertype=None
):
"""
Create a new job in the database.
"""
try:
session = DBSession
current_time = datetime.utcnow()
job = UserJob_Model(entityid=entityid,
band1=band1,
band2=band2,
band3=band3,
jobstatus=0,
starttime=current_time,
lastmodified=current_time,
rendertype=rendertype
)
session.add(job)
session.flush()
session.refresh(job)
pk = job.jobid
transaction.commit()
# could do this or a subtransacation, ie open a transaction at the
# beginning of this method.
transaction.begin()
except:
return None
try:
RenderCache_Model.add(pk, True)
except:
print 'Could not add job to rendered db'
return pk
@classmethod
def set_job_status(cls, jobid, status, url=None):
"""
Set jobstatus for jobid passed in.
"""
table_key = {1: "status1time",
2: "status2time",
3: "status3time",
4: "status4time",
5: "status5time",
10: "status10time"}
try:
current_time = datetime.utcnow()
DBSession.query(cls).filter(cls.jobid == int(jobid)).update(
{"jobstatus": status,
table_key[int(status)]: current_time,
"lastmodified": current_time
})
transaction.commit()
except:
print 'Database write failed.'
# Tell render_cache db we have this image now
if int(status) == 5:
try:
RenderCache_Model.update(jobid, False, url)
except:
print 'Could not update Rendered db'
try:
cls.email_user(jobid)
except:
print 'Email failed'
@classmethod
def email_user(cls, jobid):
"""
If request contains email_address, send email to user with a link to
the full render zip file.
"""
job = DBSession.query(cls).filter(cls.jobid == int(jobid)).first()
email_address = job.email
if email_address:
bands = str(job.band1) + str(job.band2) + str(job.band3)
scene = job.entityid
full_render = ("http://snapsatcompositesjoel.s3.amazonaws.com/{}_bands"
"_{}.zip").format(scene, bands)
scene_url = 'http://snapsat.org/scene/{}#{}'.format(scene, bands)
request_url = 'https://api.mailgun.net/v2/{0}/messages'.format(
mailgun_url)
requests.post(request_url, auth=('api', mailgun_key),
data={
'from': 'no-reply@snapsat.org',
'to': email_address,
'subject': 'Snapsat has rendered your request',
'text': ("Thank you for using Snapsat.\nYour full composite is"
" available here:\n{}\nScene data can be found here:"
"\n{}\n\n-Snapsat.org").format(full_render, scene_url)
})
@classmethod
def set_worker_instance_id(cls, jobid, worker_instance_id):
"""
Set worker instance id for requested job to track which worker is doing
the job.
"""
try:
DBSession.query(cls).filter(cls.jobid == int(jobid)).update(
{"workerinstanceid": worker_instance_id})
transaction.commit()
except:
print 'database write failed'
|
mit
| 3,469,872,206,858,366,500
| 34.359848
| 83
| 0.532191
| false
| 4.470785
| false
| false
| false
|
bnrubin/userv
|
tests/test_encyclopedia.py
|
1
|
2665
|
from flask import request, url_for
from userv.encyclopedia.models import Fact
from pprint import pprint
from datetime import datetime, timezone
import arrow
import json
def test_factoids_all_one(session, db,app, client):
now = datetime.now(timezone.utc)
anow = arrow.get(now)
f = Fact(id=1, name='foo', author='Ben Franklin', popularity=42,
value="don't panic", added=now)
session.add(f)
#session.commit()
print(client.get('/api/v1/factoids/all').get_data())
response = client.get(url_for('encyclopedia.factoidsall')).json
expected = [
{'id': 1,
'name': 'foo',
'author': 'Ben Franklin',
'popularity': 42,
'value': "don't panic",
'added': str(anow)
}
]
assert response == expected
def test_factoids_all_many(session, db,app, client):
now = datetime.now(timezone.utc)
anow = arrow.get(now)
f = Fact(id=1, name='foo', author='Ben Franklin', popularity=42,
value="don't panic", added=now)
session.add(f)
f = Fact(id=2, name='bar', author='Alexander Hamilton', popularity=-1,
value='I am not giving away my shot', added=now)
session.add(f)
print(client.get('/api/v1/factoids/all').get_data())
response = client.get(url_for('encyclopedia.factoidsall')).json
expected = [
{'id': 1,
'name': 'foo',
'author': 'Ben Franklin',
'popularity': 42,
'value': "don't panic",
'added': str(anow)
},
{'id': 2,
'name': 'bar',
'author': 'Alexander Hamilton',
'popularity': -1,
'value': 'I am not giving away my shot',
'added': str(anow)
}
]
assert response == expected
def test_factoid_one(session, client):
now = datetime.now(timezone.utc)
anow = arrow.get(now)
f = Fact(id=1, name='foo', author='Ben Franklin', popularity=42,
value="don't panic", added=now)
session.add(f)
#session.commit()
print(client.get('/api/v1/factoids/fact/foo').get_data())
response = client.get(url_for('encyclopedia.factoidbyname', name='foo')).json
expected = {'id': 1,
'name': 'foo',
'author': 'Ben Franklin',
'popularity': 42,
'value': "don't panic",
'added': str(anow)
}
assert response == expected
|
mit
| 1,062,741,826,242,034,800
| 27.351064
| 81
| 0.512195
| false
| 3.660714
| false
| false
| false
|
RihardsT/forgettables
|
Languages_Programming/Python/python.py
|
1
|
8335
|
Simple Server
python3 -m http.server # 8000 # --bind 127.0.0.1
python2 -m SimpleHTTPServer # 8000
Python'ā atstares ir svarīgas.'
#Single line coment
""" Multiline comment.
Apostrofi apzīmē string, tāpat kā pēdiņas. \ zīme ļauj to labot.
There\'s a snake. Ir Python'am saprotami.
"""
Operators: = - * / ** % // #// floor divide
Comparators > < >= <= == !=
Assignment operators: += -= *= /=
Bool operators not and or #evaluated in this order. # 2<3 and 5<6 => True
Bitwise Operators: >> /Right shift << /Left shift & /Bitwise AND
| /Bitwise OR ^ /Bitwise XOR ~ /Bitwise NOT # & | return int, convert to binary with bin()
To write number in binary start with 0b #0b10 = 2, 0b11 = 3
#Python 2.* style
print "Life"+"of"+"Brian"+str(2) # +
name = "Name"
print "Hello %s" %(name) # %s and %(var) formatting operator.
print "String", var + 1 #var=0, prints String 1
print char, # , nozīmē, ka izprintēs bez \n
print a, b
#Python 3.* style
print('{0} and {1}'.format('var1', 'var2'))
# Python 3.6 String interpolation
f'can_put_text_here {variable}'
f'variable in brackets {{ {variable} }}'
# print array contents without brackets
print(*array, sep=', ')
Variables:
name = value #value var būt jebkas. int, float, bool, array, string, obj
String: a= "string"[0] #Piekļuve ar index.
String methods: len(variable) string.lower() .upper() str(var_to_string)
.isalpha() #Pārbauda, vai string satur tikai burtus
.split() #returns list with words
" ".join(list)
name = raw_input("Question") #Input konsolē.
name = input('Enter something')
list = [var1, var2] # Array
list[0] = changeVal
list[1:9:2] #list slicing [start:stop:step] [3:] [:2] [::2] / [::-1] #reverse #string slice, split
list.append(var) .insert(1,var) #.insert(position, var)
.sort() .index(var) #animals.index("bat") => returns index of bat
.pop(index) #Izņem no list un atgriež vērtību
.remove(item) #Izņem elementu, ja to atrod.
del(list[index]) #kā .pop, bet neatgriež vērtību
evens_to_50 = [i for i in range(51) if i % 2 == 0] #generate list
dictionary = {'key':value, 'key2':value} # Hash in ruby
dictionary[key] = newValue
del dictionary[key]
# dictionary.remove(key) # Python 2 ?
.items() #returns key/value pairs, not ordered
.keys()
.values()
.clear()
.replace() # replace char in string
if/elif/else
if True:
#Do code
pass #does nothing
elif True:
#Else if code
else:
#Code
if var not in list:
#var pievienot list'am, ja tas jau nav tajā iekšā.
if True: #code
'Yes' if fruit == 'Apple' else 'No' #value_when_true if condition else value_when_false
####for, for/else // while, while/else
for var in list_name:
#code #Šādi ejot cauri list nevar mainīt vērtības
else:
#else izpildas tikai tad, ja for izpildas normāli, ja nav break
for key in dictionary:
print dictionary[key]
for i in range(0, 5): #for: from to. Skaita i. Tipisks for cikls
n[i] = n[i] * 2
#Šādi iterē ar indexiem un var mainīt list vērtības.
for index, item in enumerate(choices): #enumerate dod indexu
print index+1, item
while True: #var izmantot break, praktiski radot do while loop
#code
if True:
break
while True:
#code
else:
#Else condition
Built in functions:
range(stop) // range(start, stop) // range(start, stop, step)
max(1,2,3) min()
abs(-3) #absolūtā pozīcija no 0. Proti -3 => 3
sum()
type(var) #atgriež var tipu: int, float, str
len(var)
str(var_to_string)
float(var_to_float) # int to float
int(to_int)
zip(list_1, list_2) #zip sapāro divu vai vairāk listu elementus
filter(function_what_to_filter, object_to_filter) #See lambda
bin(1) #returns binary representation of int #or vice versa ?
oct()
hex()
int("number_in_string", base_of_that_number) #returns value of that in base 10
set(list_in_here) # Returns unique elements
map()
Functions:
def function_name(params):
#code
function_name(params) #Call function
Anonymous function
lambda x: x % 3 == 0
#same as:
def by_three(x):
return x % 3 == 0
languages = ["HTML", "JavaScript", "Python", "Ruby"]
print filter(lambda x: x == "Python" ,languages)
Classes:
class ClassName(object):
member_variable = True #Pieejami jebkuram šīs klases objektam
#Interesanti, ka pēc objekta definēšanas default vērtību var nomainīt.
def __init__(self, name):
self.name = name #Instance variables. Katram objektam pieejami tikai savas vērtības
def method_name(self): #self norāda, ka metode pieejama tikai atsevišķam objektam.
pass
def __repr__(self):
return "(%d, %d, %d)" %(self.x, self.y, self.z)
#__repr__() nosaka to, kādā veidā objekts tiks attēlots. print my_object
class_object = ClassName("Name") #Objekta izveidošana
print class_object.name #Var piekļūt objekta mainīgajiem ar punktu
class_object.member_variable = False #nomaina default vērtību.
#Šis neizmaina pārējo objektu member_variable vērtību. Tiem tā joprojām ir default.
Inheritance:
class ChildClass(ParentClass):
#pieejamas ParentClass funkcijas u.t.t
def method_name(self): #Override. Pārraksta ar to pašu nosaukumu, kā ParentClass metodei.
return super(Derived, self).method_name() #ar super var piekļūt ParentClass ...
#CodeAcademy/Python/Introduction to Classes/14
class Employee(object):
def __init__(self, employee_name):
self.employee_name = employee_name
def calculate_wage(self, hours):
self.hours = hours
return hours * 20.00
class PartTimeEmployee(Employee):
def calculate_wage(self, hours):
self.hours = hours
return hours*12.00
def full_time_wage(self, hours):
return super(PartTimeEmployee, self).calculate_wage(hours)
milton = PartTimeEmployee("Milton")
print milton.full_time_wage(10)
############## FileInput/Output
### with is the prefered way how to deal with files. This takes care of open/close
# read line by line
with open("output.txt", "r") as f:
contents = f.read()
for line in f:
pass
### Open multiple files
with open('file1', 'w') as file1, open('file2', 'w') as file2:
pass
f = open("output.txt", "w")
#modes: "w" write only, "r" read only, "r+" read and write, "a" append
f.write("Data to be written")
print(f.read()) #Izvada visu
print(f.readline()) #Pirmoreiz pirmā rinda
print(f.readline()) #Otru - otrā rinda
f.close() #Must close the file.
f.closed #returns True False. Atgriež vai fails ir atvērts vai aizvērts.
#You always need to close your files after you're done writing to them.
#During the I/O process, data is buffered: it is held in a temp before being written to the file.
#Python doesn't flush the buffer, write data to the file—until it's sure you're done writing.
#If you write to a file without closing, the data won't make it to the target file.
with open("file", "mode") as variable:
# Read or write to the file
with open("text.txt", "w") as textfile:
textfile.write("Success!")
import math #generic import. Jāraksta math pirms katras tās funkcijas. math.sqrt(9)
from module import function #function import
from module import * #universal imports. Nav jāraksta math. pirms katras funkcijas
#Universāli importi var radīt problēmas, ja pats uzraksta funkciju ar tādu pašu nosaukumu.
#Piem. Sava funkc sqrt radītu problēmas, ja izmantotu from math import sqrt.
#Ja izmanto import math, tad sqrt izsauktu savējo, bet math.sqrt izsauktu no math.
import math # Imports the math module
everything = dir(math) # Sets everything to a list of things from math
print everything # Prints 'em all!
from datetime import datetime
print(datetime.now())
now = datetime.now()
print('{0}-{1}-{2}'.format(now.year, now.month, now.day))
from random import randint #Random int
import random
random.random() # float in range [0.0 1.0]
### string to date
import datetime
datetime.datetime.strptime(date_string, format)
# Format reference: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
# Get date month ago
time_now = datetime.datetime.utcnow()
time_30_days_ago = time_now - datetime.timedelta(days=30)
### compare dates. Replace tzinfo with None, if getting error:
# TypeError: can't compare offset-naive and offset-aware datetimes
some_date.replace(tzinfo=None) < time_30_days_ago
######
### Run system command
# https://docs.python.org/3/library/subprocess.html
import subprocess
subprocess.run(["COMMAND", "ARGUMENT"])
|
unlicense
| -7,686,488,076,607,635,000
| 33.894068
| 99
| 0.708682
| false
| 2.719617
| false
| false
| false
|
ninegrid/dotfiles-vim
|
bundle/vim-orgmode/ftplugin/orgmode/plugins/EditCheckbox.py
|
1
|
7316
|
# -*- coding: utf-8 -*-
import vim
from orgmode._vim import echo, echom, echoe, ORGMODE, apply_count, repeat, insert_at_cursor, indent_orgmode
from orgmode.menu import Submenu, Separator, ActionEntry, add_cmd_mapping_menu
from orgmode.keybinding import Keybinding, Plug, Command
from orgmode.liborgmode.checkboxes import Checkbox
from orgmode.liborgmode.dom_obj import OrderListType
class EditCheckbox(object):
u"""
Checkbox plugin.
"""
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'Edit Checkbox')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
@classmethod
def new_checkbox(cls, below=None):
d = ORGMODE.get_document()
h = d.current_heading()
if h is None:
return
# init checkboxes for current heading
h.init_checkboxes()
c = h.current_checkbox()
nc = Checkbox()
nc._heading = h
# default checkbox level
level = h.level
start = vim.current.window.cursor[0] - 1
# if no checkbox is found, insert at current line with indent level=1
if c is None:
if h.checkboxes:
level = h.first_checkbox.level
h.checkboxes.append(nc)
else:
l = c.get_parent_list()
idx = c.get_index_in_parent_list()
if l is not None and idx is not None:
l.insert(idx + (1 if below else 0), nc)
# workaround for broken associations, Issue #165
nc._parent = c.parent
if below:
if c.next_sibling:
c.next_sibling._previous_sibling = nc
nc._next_sibling = c.next_sibling
c._next_sibling = nc
nc._previous_sibling = c
else:
if c.previous_sibling:
c.previous_sibling._next_sibling = nc
nc._next_sibling = c
nc._previous_sibling = c.previous_sibling
c._previous_sibling = nc
t = c.type
# increase key for ordered lists
if t[-1] in OrderListType:
try:
num = int(t[:-1]) + (1 if below else -1)
t = '%d%s' % (num, t[-1])
except ValueError:
try:
char = ord(t[:-1]) + (1 if below else -1)
t = '%s%s' % (chr(char), t[-1])
except ValueError:
pass
nc.type = t
if not c.status:
nc.status = None
level = c.level
if below:
start = c.end_of_last_child
else:
start = c.start
nc.level = level
vim.current.window.cursor = (start + 1, 0)
if below:
vim.command("normal o")
else:
vim.command("normal O")
insert_at_cursor(str(nc))
vim.command("call feedkeys('a')")
@classmethod
def toggle(cls, checkbox=None):
u"""
Toggle the checkbox given in the parameter.
If the checkbox is not given, it will toggle the current checkbox.
"""
d = ORGMODE.get_document()
current_heading = d.current_heading()
# init checkboxes for current heading
if current_heading is None:
return
current_heading = current_heading.init_checkboxes()
if checkbox is None:
# get current_checkbox
c = current_heading.current_checkbox()
# no checkbox found
if c is None:
cls.update_checkboxes_status()
return
else:
c = checkbox
if c.status == Checkbox.STATUS_OFF:
# set checkbox status on if all children are on
if not c.children or c.are_children_all(Checkbox.STATUS_ON):
c.toggle()
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_ON:
if not c.children or c.is_child_one(Checkbox.STATUS_OFF):
c.toggle()
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_INT:
# can't toggle intermediate state directly according to emacs orgmode
pass
# update checkboxes status
cls.update_checkboxes_status()
@classmethod
def _update_subtasks(cls):
d = ORGMODE.get_document()
h = d.current_heading()
# init checkboxes for current heading
h.init_checkboxes()
# update heading subtask info
c = h.first_checkbox
if c is None:
return
total, on = c.all_siblings_status()
h.update_subtasks(total, on)
# update all checkboxes under current heading
cls._update_checkboxes_subtasks(c)
@classmethod
def _update_checkboxes_subtasks(cls, checkbox):
# update checkboxes
for c in checkbox.all_siblings():
if c.children:
total, on = c.first_child.all_siblings_status()
c.update_subtasks(total, on)
cls._update_checkboxes_subtasks(c.first_child)
@classmethod
def update_checkboxes_status(cls):
d = ORGMODE.get_document()
h = d.current_heading()
# init checkboxes for current heading
h.init_checkboxes()
cls._update_checkboxes_status(h.first_checkbox)
cls._update_subtasks()
@classmethod
def _update_checkboxes_status(cls, checkbox=None):
u""" helper function for update checkboxes status
:checkbox: The first checkbox of this indent level
:return: The status of the parent checkbox
"""
if checkbox is None:
return
status_off, status_on, status_int, total = 0, 0, 0, 0
# update all top level checkboxes' status
for c in checkbox.all_siblings():
current_status = c.status
# if this checkbox is not leaf, its status should determine by all its children
if c.children:
current_status = cls._update_checkboxes_status(c.first_child)
# don't update status if the checkbox has no status
if c.status is None:
current_status = None
# the checkbox needs to have status
else:
total += 1
# count number of status in this checkbox level
if current_status == Checkbox.STATUS_OFF:
status_off += 1
elif current_status == Checkbox.STATUS_ON:
status_on += 1
elif current_status == Checkbox.STATUS_INT:
status_int += 1
# write status if any update
if current_status is not None and c.status != current_status:
c.status = current_status
d = ORGMODE.get_document()
d.write_checkbox(c)
parent_status = Checkbox.STATUS_INT
# all silbing checkboxes are off status
if status_off == total:
parent_status = Checkbox.STATUS_OFF
# all silbing checkboxes are on status
elif status_on == total:
parent_status = Checkbox.STATUS_ON
# one silbing checkbox is on or int status
elif status_on != 0 or status_int != 0:
parent_status = Checkbox.STATUS_INT
# other cases
else:
parent_status = None
return parent_status
def register(self):
u"""
Registration of the plugin.
Key bindings and other initialization should be done here.
"""
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewAbove',
function=u':py ORGMODE.plugins[u"EditCheckbox"].new_checkbox()<CR>',
key_mapping=u'<localleader>cN',
menu_desrc=u'New CheckBox Above'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewBelow',
function=u':py ORGMODE.plugins[u"EditCheckbox"].new_checkbox(below=True)<CR>',
key_mapping=u'<localleader>cn',
menu_desrc=u'New CheckBox Below'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxToggle',
function=u':silent! py ORGMODE.plugins[u"EditCheckbox"].toggle()<CR>',
key_mapping=u'<localleader>cc',
menu_desrc=u'Toggle Checkbox'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxUpdate',
function=u':silent! py ORGMODE.plugins[u"EditCheckbox"].update_checkboxes_status()<CR>',
key_mapping=u'<localleader>c#',
menu_desrc=u'Update Subtasks'
)
# vim: set noexpandtab:
|
unlicense
| -3,455,821,195,089,854,000
| 26.400749
| 107
| 0.6807
| false
| 3.077829
| false
| false
| false
|
juergspaak/EF-at-invariant-richness
|
plot_figS5.py
|
1
|
1315
|
"""
@author: J.W. Spaak
This programm plots Fig. S4
"""
from plot_functions import bars
import community_construction_repl as repl
#compute DeltaEF/EF for the different communities and different cases
EF_data = {key: repl.delta_EF_lin(*repl.para[key]) for key in repl.para.keys()}
# species have different f
EF_data_dif = {key: repl.delta_EF_lin(*repl.para[key], sim_f = False)
for key in repl.para.keys()}
# plot results
keys = ['0.95', '0.75', '0.50','0.25', '0.05']
cols = ['#006400','#CF0000', '#90FB90', '#800000']
# plot the delta EF communities with same f
fig, ax, ind = bars(EF_data, keys, col = cols[:2])
# add the deltaEF of the different f communities
fig, ax, ind = bars(EF_data_dif, keys, fig, ax, col = cols[2:])
# adjust axis
ax.set_xlim([-0.2,ind[-1]+0.7])
ax.set_xticks(ind+0.15)
ax.set_xticklabels(keys)
ax.set_xlabel("Proportion p of species present at both sites", fontsize = 16)
ax.set_ylim([-80,140]) # add enough space for the legend
# add legend
legend = {col: ax.bar(0,0,width = 0 ,color = 'white'
,edgecolor=col,linewidth=1.5) for col in cols}
lab = ['e<0, same f', 'e>0, same f', 'e<0, diff f', 'e>0, diff f']
ax.legend([legend[col] for col in cols],lab, loc = 'upper left')
# save figure
fig.savefig("Figure S5, diff f.pdf")
|
mit
| -6,361,027,837,631,320,000
| 32.717949
| 79
| 0.644867
| false
| 2.803838
| false
| false
| false
|
autosportlabs/RaceCapture_App
|
autosportlabs/racecapture/views/dashboard/widgets/digitalgauge.py
|
1
|
2538
|
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.10.0')
from kivy.uix.boxlayout import BoxLayout
from kivy.app import Builder
from kivy.metrics import dp
from utils import kvFind, kvquery
from fieldlabel import AutoShrinkFieldLabel
from kivy.properties import NumericProperty, ObjectProperty
from autosportlabs.racecapture.views.dashboard.widgets.gauge import CustomizableGauge
DEFAULT_BACKGROUND_COLOR = [0, 0, 0, 0]
class DigitalGauge(CustomizableGauge):
Builder.load_string("""
<DigitalGauge>:
anchor_x: 'center'
anchor_y: 'center'
title_size: self.height * 0.5
value_size: self.height * 0.7
BoxLayout:
orientation: 'horizontal'
spacing: self.height * 0.1
AutoShrinkFieldLabel:
id: title
text: 'channel'
font_size: root.title_size
halign: 'right'
AutoShrinkFieldLabel:
canvas.before:
Color:
rgba: root.alert_background_color
Rectangle:
pos: self.pos
size: self.size
id: value
text: '---'
font_size: root.value_size
halign: 'center'
""")
alert_background_color = ObjectProperty(DEFAULT_BACKGROUND_COLOR)
def __init__(self, **kwargs):
super(DigitalGauge, self).__init__(**kwargs)
self.normal_color = DEFAULT_BACKGROUND_COLOR
def update_title(self, channel, channel_meta):
try:
self.title = channel if channel else ''
except Exception as e:
print('Failed to update digital gauge title ' + str(e))
def update_colors(self):
alert_color = self.select_alert_color()
self.alert_background_color = DEFAULT_BACKGROUND_COLOR if alert_color is None else alert_color
|
gpl-3.0
| 1,654,545,606,196,182,300
| 32.394737
| 102
| 0.657998
| false
| 4.009479
| false
| false
| false
|
jehine-MSFT/azure-storage-python
|
azure/storage/sharedaccesssignature.py
|
1
|
35228
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from datetime import date
from ._common_conversion import (
_sign_string,
_to_str,
)
from ._serialization import (
url_quote,
_to_utc_datetime,
)
from ._constants import X_MS_VERSION
class SharedAccessSignature(object):
'''
Provides a factory for creating blob, queue, table, and file shares access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to genenerate the shares access signatures.
'''
self.account_name = account_name
self.account_key = account_key
def generate_table(self, table_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None,
start_pk=None, start_rk=None,
end_pk=None, end_rk=None):
'''
Generates a shared access signature for the table.
Use the returned signature with the sas_token parameter of TableService.
:param str table_name:
Name of table.
:param TablePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str start_pk:
The minimum partition key accessible with this shared access
signature. startpk must accompany startrk. Key values are inclusive.
If omitted, there is no lower bound on the table entities that can
be accessed.
:param str start_rk:
The minimum row key accessible with this shared access signature.
startpk must accompany startrk. Key values are inclusive. If
omitted, there is no lower bound on the table entities that can be
accessed.
:param str end_pk:
The maximum partition key accessible with this shared access
signature. endpk must accompany endrk. Key values are inclusive. If
omitted, there is no upper bound on the table entities that can be
accessed.
:param str end_rk:
The maximum row key accessible with this shared access signature.
endpk must accompany endrk. Key values are inclusive. If omitted,
there is no upper bound on the table entities that can be accessed.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk)
sas.add_resource_signature(self.account_name, self.account_key, 'table', table_name)
return sas.get_token()
def generate_queue(self, queue_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the queue.
Use the returned signature with the sas_token parameter of QueueService.
:param str queue_name:
Name of queue.
:param QueuePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, add, update, process.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name)
return sas.get_token()
def generate_blob(self, container_name, blob_name, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = container_name + '/' + blob_name
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('b')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
return sas.get_token()
def generate_container(self, container_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the container.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param ContainerPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('c')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name)
return sas.get_token()
def generate_file(self, share_name, directory_name=None, file_name=None,
permission=None, expiry=None, start=None, id=None,
ip=None, protocol=None, cache_control=None,
content_disposition=None, content_encoding=None,
content_language=None, content_type=None):
'''
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = share_name
if directory_name is not None:
resource_path += '/' + _to_str(directory_name)
resource_path += '/' + _to_str(file_name)
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('f')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path)
return sas.get_token()
def generate_share(self, share_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the share.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param SharePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('s')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name)
return sas.get_token()
def generate_account(self, services, resource_types, permission, expiry, start=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_account(services, resource_types)
sas.add_account_signature(self.account_name, self.account_key)
return sas.get_token()
class _QueryStringConstants(object):
SIGNED_SIGNATURE = 'sig'
SIGNED_PERMISSION = 'sp'
SIGNED_START = 'st'
SIGNED_EXPIRY = 'se'
SIGNED_RESOURCE = 'sr'
SIGNED_IDENTIFIER = 'si'
SIGNED_IP = 'sip'
SIGNED_PROTOCOL = 'spr'
SIGNED_VERSION = 'sv'
SIGNED_CACHE_CONTROL = 'rscc'
SIGNED_CONTENT_DISPOSITION = 'rscd'
SIGNED_CONTENT_ENCODING = 'rsce'
SIGNED_CONTENT_LANGUAGE = 'rscl'
SIGNED_CONTENT_TYPE = 'rsct'
TABLE_NAME = 'tn'
START_PK = 'spk'
START_RK = 'srk'
END_PK = 'epk'
END_RK = 'erk'
SIGNED_RESOURCE_TYPES = 'srt'
SIGNED_SERVICES = 'ss'
class _SharedAccessHelper():
def __init__(self):
self.query_dict = {}
def _add_query(self, name, val):
if val:
self.query_dict[name] = _to_str(val)
def add_base(self, permission, expiry, start, ip, protocol):
if isinstance(start, date):
start = _to_utc_datetime(start)
if isinstance(expiry, date):
expiry = _to_utc_datetime(expiry)
self._add_query(_QueryStringConstants.SIGNED_START, start)
self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
self._add_query(_QueryStringConstants.SIGNED_IP, ip)
self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
self._add_query(_QueryStringConstants.SIGNED_VERSION, X_MS_VERSION)
def add_resource(self, resource):
self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
def add_id(self, id):
self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
def add_account(self, services, resource_types):
self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
def add_table_access_ranges(self, table_name, start_pk, start_rk,
end_pk, end_rk):
self._add_query(_QueryStringConstants.TABLE_NAME, table_name)
self._add_query(_QueryStringConstants.START_PK, start_pk)
self._add_query(_QueryStringConstants.START_RK, start_rk)
self._add_query(_QueryStringConstants.END_PK, end_pk)
self._add_query(_QueryStringConstants.END_RK, end_rk)
def add_override_response_headers(self, cache_control,
content_disposition,
content_encoding,
content_language,
content_type):
self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
def add_resource_signature(self, account_name, account_key, service, path):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource +
get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
if service == 'blob' or service == 'file':
string_to_sign += \
(get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
if service == 'table':
string_to_sign += \
(get_value_to_append(_QueryStringConstants.START_PK) +
get_value_to_append(_QueryStringConstants.START_RK) +
get_value_to_append(_QueryStringConstants.END_PK) +
get_value_to_append(_QueryStringConstants.END_RK))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))
def add_account_signature(self, account_name, account_key):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
string_to_sign = \
(account_name + '\n' +
get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))
def get_token(self):
return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
|
apache-2.0
| -6,093,724,331,806,510,000
| 51.73503
| 113
| 0.634105
| false
| 4.803764
| false
| false
| false
|
bswartz/manila
|
manila/api/v2/share_networks.py
|
1
|
14622
|
# Copyright 2014 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The shares api."""
from oslo_db import exception as db_exception
from oslo_log import log
from oslo_utils import timeutils
import six
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import api_version_request as api_version
from manila.api.openstack import wsgi
from manila.api.views import share_networks as share_networks_views
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
from manila import policy
from manila import quota
from manila.share import rpcapi as share_rpcapi
RESOURCE_NAME = 'share_network'
RESOURCES_NAME = 'share_networks'
LOG = log.getLogger(__name__)
QUOTAS = quota.QUOTAS
class ShareNetworkController(wsgi.Controller):
"""The Share Network API controller for the OpenStack API."""
_view_builder_class = share_networks_views.ViewBuilder
def __init__(self):
super(ShareNetworkController, self).__init__()
self.share_rpcapi = share_rpcapi.ShareAPI()
def show(self, req, id):
"""Return data about the requested network info."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'show')
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def delete(self, req, id):
"""Delete specified share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'delete')
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
share_instances = (
db_api.share_instances_get_all_by_share_network(context, id)
)
if share_instances:
msg = _("Can not delete share network %(id)s, it has "
"%(len)s share(s).") % {'id': id,
'len': len(share_instances)}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
# NOTE(ameade): Do not allow deletion of share network used by share
# group
sg_count = db_api.count_share_groups_in_share_network(context, id)
if sg_count:
msg = _("Can not delete share network %(id)s, it has %(len)s "
"share group(s).") % {'id': id, 'len': sg_count}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
for share_server in share_network['share_servers']:
self.share_rpcapi.delete_share_server(context, share_server)
db_api.share_network_delete(context, id)
try:
reservations = QUOTAS.reserve(
context, project_id=share_network['project_id'],
share_networks=-1, user_id=share_network['user_id'])
except Exception:
LOG.exception("Failed to update usages deleting "
"share-network.")
else:
QUOTAS.commit(context, reservations,
project_id=share_network['project_id'],
user_id=share_network['user_id'])
return webob.Response(status_int=202)
def _get_share_networks(self, req, is_detail=True):
"""Returns a list of share networks."""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
if 'security_service_id' in search_opts:
networks = db_api.share_network_get_all_by_security_service(
context, search_opts['security_service_id'])
elif context.is_admin and 'project_id' in search_opts:
networks = db_api.share_network_get_all_by_project(
context, search_opts['project_id'])
elif context.is_admin and 'all_tenants' in search_opts:
networks = db_api.share_network_get_all(context)
else:
networks = db_api.share_network_get_all_by_project(
context,
context.project_id)
date_parsing_error_msg = '''%s is not in yyyy-mm-dd format.'''
if 'created_since' in search_opts:
try:
created_since = timeutils.parse_strtime(
search_opts['created_since'],
fmt="%Y-%m-%d")
except ValueError:
msg = date_parsing_error_msg % search_opts['created_since']
raise exc.HTTPBadRequest(explanation=msg)
networks = [network for network in networks
if network['created_at'] >= created_since]
if 'created_before' in search_opts:
try:
created_before = timeutils.parse_strtime(
search_opts['created_before'],
fmt="%Y-%m-%d")
except ValueError:
msg = date_parsing_error_msg % search_opts['created_before']
raise exc.HTTPBadRequest(explanation=msg)
networks = [network for network in networks
if network['created_at'] <= created_before]
opts_to_remove = [
'all_tenants',
'created_since',
'created_before',
'limit',
'offset',
'security_service_id',
'project_id'
]
for opt in opts_to_remove:
search_opts.pop(opt, None)
if search_opts:
for key, value in search_opts.items():
if key in ['ip_version', 'segmentation_id']:
value = int(value)
if (req.api_version_request >=
api_version.APIVersionRequest("2.36")):
networks = [network for network in networks
if network.get(key) == value or
(value in network.get(key.rstrip('~'))
if key.endswith('~') and
network.get(key.rstrip('~')) else ())]
else:
networks = [network for network in networks
if network.get(key) == value]
limited_list = common.limited(networks, req)
return self._view_builder.build_share_networks(
req, limited_list, is_detail)
def index(self, req):
"""Returns a summary list of share networks."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'index')
return self._get_share_networks(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of share networks."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'detail')
return self._get_share_networks(req)
def update(self, req, id, body):
"""Update specified share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'update')
if not body or RESOURCE_NAME not in body:
raise exc.HTTPUnprocessableEntity()
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
update_values = body[RESOURCE_NAME]
if 'nova_net_id' in update_values:
msg = _("nova networking is not supported starting in Ocata.")
raise exc.HTTPBadRequest(explanation=msg)
if share_network['share_servers']:
for value in update_values:
if value not in ['name', 'description']:
msg = (_("Cannot update share network %s. It is used by "
"share servers. Only 'name' and 'description' "
"fields are available for update") %
share_network['id'])
raise exc.HTTPForbidden(explanation=msg)
try:
share_network = db_api.share_network_update(context,
id,
update_values)
except db_exception.DBError:
msg = "Could not save supplied data due to database error"
raise exc.HTTPBadRequest(explanation=msg)
return self._view_builder.build_share_network(req, share_network)
def create(self, req, body):
"""Creates a new share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'create')
if not body or RESOURCE_NAME not in body:
raise exc.HTTPUnprocessableEntity()
values = body[RESOURCE_NAME]
values['project_id'] = context.project_id
values['user_id'] = context.user_id
if 'nova_net_id' in values:
msg = _("nova networking is not supported starting in Ocata.")
raise exc.HTTPBadRequest(explanation=msg)
try:
reservations = QUOTAS.reserve(context, share_networks=1)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'share_networks' in overs:
LOG.warning("Quota exceeded for %(s_pid)s, "
"tried to create "
"share-network (%(d_consumed)d of %(d_quota)d "
"already consumed).", {
's_pid': context.project_id,
'd_consumed': _consumed('share_networks'),
'd_quota': quotas['share_networks']})
raise exception.ShareNetworksLimitExceeded(
allowed=quotas['share_networks'])
else:
try:
share_network = db_api.share_network_create(context, values)
except db_exception.DBError:
msg = "Could not save supplied data due to database error"
raise exc.HTTPBadRequest(explanation=msg)
QUOTAS.commit(context, reservations)
return self._view_builder.build_share_network(req, share_network)
def action(self, req, id, body):
_actions = {
'add_security_service': self._add_security_service,
'remove_security_service': self._remove_security_service
}
for action, data in body.items():
try:
return _actions[action](req, id, data)
except KeyError:
msg = _("Share networks does not have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
def _add_security_service(self, req, id, data):
"""Associate share network with a given security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'add_security_service')
share_network = db_api.share_network_get(context, id)
if share_network['share_servers']:
msg = _("Cannot add security services. Share network is used.")
raise exc.HTTPForbidden(explanation=msg)
security_service = db_api.security_service_get(
context, data['security_service_id'])
for attached_service in share_network['security_services']:
if attached_service['type'] == security_service['type']:
msg = _("Cannot add security service to share network. "
"Security service with '%(ss_type)s' type already "
"added to '%(sn_id)s' share network") % {
'ss_type': security_service['type'],
'sn_id': share_network['id']}
raise exc.HTTPConflict(explanation=msg)
try:
share_network = db_api.share_network_add_security_service(
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except exception.ShareNetworkSecurityServiceAssociationError as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def _remove_security_service(self, req, id, data):
"""Dissociate share network from a given security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'remove_security_service')
share_network = db_api.share_network_get(context, id)
if share_network['share_servers']:
msg = _("Cannot remove security services. Share network is used.")
raise exc.HTTPForbidden(explanation=msg)
try:
share_network = db_api.share_network_remove_security_service(
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except exception.ShareNetworkSecurityServiceDissociationError as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def create_resource():
return wsgi.Resource(ShareNetworkController())
|
apache-2.0
| -7,893,771,970,282,250,000
| 41.382609
| 78
| 0.576323
| false
| 4.401565
| false
| false
| false
|
tsvstar/vk_downloader
|
pytube/api.py
|
1
|
16387
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from models import Video
from utils import safe_filename
from utils import MultipleObjectsReturned, YouTubeError, CipherError
try:
from urllib2 import urlopen
from urlparse import urlparse, parse_qs, unquote
except ImportError:
from urllib.parse import urlparse, parse_qs, unquote
from urllib.request import urlopen
import re
import json
YT_BASE_URL = 'http://www.youtube.com/get_video_info'
# YouTube quality and codecs id map.
# source: http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs
YT_ENCODING = {
# Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
# 3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "NA", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
# MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"],
# WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
# The keys corresponding to the quality/codec map above.
YT_ENCODING_KEYS = (
'extension',
'resolution',
'video_codec',
'profile',
'video_bitrate',
'audio_codec',
'audio_bitrate'
)
class BaseProvider(object):
# TODO: just cause you CAN do this, doesn't mean you should. `hasattr` is
# much cleaner.
_filename = None
_fmt_values = []
_video_url = None
_js_code = False
_precompiled = False
title = None
videos = []
# fmt was an undocumented URL parameter that allowed selecting
# YouTube quality mode without using player user interface.
@property
def url(self):
return self._video_url
@url.setter
def url(self, url):
self._video_url = url
self._filename = None
self._get_video_info()
@property
def filename(self):
if not self._filename:
self._filename = safe_filename(self.title)
return self._filename
@filename.setter
def filename(self, filename):
self._filename = filename
if self.videos:
for video in self.videos:
video.filename = filename
def get(self, extension=None, resolution=None, profile="High"):
"""Return a single video given an extention and resolution.
:params extention: The desired file extention (e.g.: mp4).
:params resolution: The desired video broadcasting standard.
:params profile: The desired quality profile.
"""
result = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
elif profile and v.profile != profile:
continue
else:
result.append(v)
if not len(result):
return
elif len(result) is 1:
return result[0]
else:
raise MultipleObjectsReturned(
"get() returned more than one object")
def filter(self, extension=None, resolution=None):
"""Return a filtered list of videos given an extention and resolution
criteria.
:params extention: The desired file extention (e.g.: mp4).
:params resolution: The desired video broadcasting standard.
"""
results = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
else:
results.append(v)
return results
"""
================================================
YOUTUBE SERVICE
================================================
"""
class YouTube(BaseProvider):
@property
def video_id(self):
"""Gets the video ID extracted from the URL.
"""
parts = urlparse(self._video_url)
qs = getattr(parts, 'query', None)
if qs:
video_id = parse_qs(qs).get('v', None)
if video_id:
return video_id.pop()
def _fetch(self, path, data):
"""Given a path, traverse the response for the desired data. (A
modified ver. of my dictionary traverse method:
https://gist.github.com/2009119)
:params path: A tuple representing a path to a node within a tree.
:params data: The data containing the tree.
"""
elem = path[0]
# Get first element in tuple, and check if it contains a list.
if type(data) is list:
# Pop it, and let's continue..
return self._fetch(path, data.pop())
# Parse the url encoded data
data = parse_qs(data)
# Get the element in our path
data = data.get(elem, None)
# Offset the tuple by 1.
path = path[1::1]
# Check if the path has reached the end OR the element return
# nothing.
if len(path) is 0 or data is None:
if type(data) is list and len(data) is 1:
data = data.pop()
return data
else:
# Nope, let's keep diggin'
return self._fetch(path, data)
def _parse_stream_map(self, text):
"""Python's `parse_qs` can't properly decode the stream map
containing video data so we use this instead.
"""
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"s": [],
"type": []
}
# Split individual videos
videos = text.split(",")
# Unquote the characters and split to parameters
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
value = value.encode('ascii') #@tsv hack - unquote fail to parse unicode
videoinfo.get(key, []).append(unquote(value))
return videoinfo
def _get_video_info(self):
"""This is responsable for executing the request, extracting the
necessary details, and populating the different video resolutions and
formats into a list.
"""
# TODO: split up into smaller functions. Cyclomatic complexity => 15
self.title = None
self.videos = []
url = self.url.replace('feature=player_embedded&','')
response = urlopen(url)
if response:
content = response.read().decode("utf-8")
try:
player_conf = content[18 + content.find("ytplayer.config = "):]
bracket_count = 0
for i, char in enumerate(player_conf):
if char == "{":
bracket_count += 1
elif char == "}":
bracket_count -= 1
if bracket_count == 0:
break
else:
raise YouTubeError("Cannot get JSON from HTML")
index = i + 1
data = json.loads(player_conf[:index])
except Exception as e:
raise YouTubeError("Cannot decode JSON: {0}".format(e))
is_vevo = False
if data['args'].get('ptk', '') in ['vevo', 'dashmpd']:
# Vevo videos with encrypted signatures
is_vevo = True
stream_map = self._parse_stream_map(
data["args"]["url_encoded_fmt_stream_map"])
self.title = data["args"]["title"]
js_url = "http:" + data["assets"]["js"]
video_urls = stream_map["url"]
for i, url in enumerate(video_urls):
try:
fmt, fmt_data = self._extract_fmt(url)
##print fmt_data
except (TypeError, KeyError):
continue
# If the signature must be ciphered...
###print "ptk=%s"%data['args'].get('ptk',''), is_vevo, (url.split('signature=')+[''])[1].split('&')[0]
if "signature=" not in url:
if is_vevo:
has_decrypted_signature = False
try:
signature = self._decrypt_signature(
stream_map['s'][0])
url += '&signature=' + signature
has_decrypted_signature = True
except TypeError:
pass
if not has_decrypted_signature:
raise CipherError(
"Couldn't cipher the vevo signature. "
"Maybe YouTube has changed the cipher "
"algorithm.")
else:
signature = self._cipher2(stream_map["s"][i], js_url)
url = "%s&signature=%s" % (url, signature)
self.videos.append(Video(url, self.filename, **fmt_data))
self._fmt_values.append(fmt)
self.videos.sort()
@staticmethod
def _decrypt_signature(s):
"""Comment me :)
"""
def tu(a, b):
c = a[0]
a[0] = a[b % len(a)]
a[b] = c
return a
def splice(a, b):
return a[b:]
a = list(s)
a = tu(a[::-1], 26)
a = tu(a[::-1], 28)
a = tu(a, 38)
a = splice(a[::-1], 3)
return "".join(a)
def _cipher(self, s, url):
"""
(OBSOLETE DECHYPHERING FUNCTION - user _cipher2)
Get the signature using the cipher implemented in the JavaScript code
:params s: Signature
:params url: url of JavaScript file
"""
import tinyjs
# Getting JS code (if hasn't downloaded yet)
if not self._js_code:
self._js_code = (urlopen(url).read().decode()
if not self._js_code else self._js_code)
try:
# Find first function
regexp = r'function \w{2}\(\w{1}\)\{\w{1}=\w{1}\.split\(\"\"\)' \
'\;(.*)\}'
code = re.findall(regexp, self._js_code)[0]
code = code[:code.index("}")]
# pre-code
signature = "a='" + s + "'"
# Tiny JavaScript VM
jsvm = tinyjs.JSVM()
# Precompiling with the super JavaScript VM (if hasn't compiled yet)
if not self._precompiled:
self._precompiled = jsvm.compile(code)
# Make first function + pre-code
jsvm.setPreinterpreted(jsvm.compile(signature) + self._precompiled)
# Executing the JS code
return jsvm.run()["return"]
except Exception as e:
##raise
raise CipherError("Couldn't cipher the signature. Maybe YouTube "
"has changed the cipher algorithm(%s)" % e)
def _re_search( self, pattern, s):
"""auxilary function - find first answer for pattern"""
mobj = re.search( pattern, s)
return next(g for g in mobj.groups() if g is not None)
def _parse_sig_js(self, jscode):
"""auxilary function - prepare javascript decryption function"""
import pytube.jsinterp
# found required function from by pattern
funcname = self._re_search(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode )
# prepare function body to run
jsi = pytube.jsinterp.JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _cipher2(self, s, url):
"""(ACTUAL DECIPHERING FUNCTION)
s - signature
url - url of decryptor javascript
"""
# load JS if needed
if not self._js_code:
self._js_code = (urlopen(url).read().decode()
if not self._js_code else self._js_code)
try:
# get decryptor function
dec_func = self._parse_sig_js(self._js_code)
# run decryptor function
return dec_func(s)
except Exception as e:
##raise
raise CipherError("Couldn't cipher the signature. Maybe YouTube "
"has changed the cipher algorithm(%s)" % e)
def _extract_fmt(self, text):
"""YouTube does not pass you a completely valid URLencoded form, I
suspect this is suppose to act as a deterrent.. Nothing some regulular
expressions couldn't handle.
:params text: The malformed data contained within each url node.
"""
itag = re.findall('itag=(\d+)', text)
if itag and len(itag) is 1:
itag = int(itag[0])
attr = YT_ENCODING.get(itag, None)
if not attr:
return itag, None
return itag, dict(zip(YT_ENCODING_KEYS, attr))
"""
================================================
VIMEO SERVICE
================================================
"""
VIMEO_ENCODING = {
'mobile': ["mp4", "270p", "H.264", "Base", "0.3", "AAC", "112"], # '480x270 H.264/AAC Stereo MP4',
'sd': ["mp4", "360p", "H.264", "High", "0.7", "AAC", "112"], # '640x360 H.264/AAC Stereo MP4',
'hd': ["mp4", "720p", "H.264", "High", "1.5-1.6", "AAC", "160"], #'1280x720 H.264/AAC Stereo MP4',
}
def _download(url):
response = urlopen(url)
if response:
return response.read().decode("utf-8")
return ''
class Vimeo(BaseProvider):
def _get_video_info(self):
"""This is responsable for executing the request, extracting the
necessary details, and populating the different video resolutions and
formats into a list.
"""
self.title = None
self.videos = []
html = _download(self.url)
match = re.search('content="?(https?://player.vimeo.com/video/([0-9]+))">',html)
if match:
_id = match.group(1)
self.title = "vimeo%s" % match.group(2)
else:
raise YouTubeError("Fail to parse vimeo page: %s"%self.url)
match = re.search('<meta name="description" content="([^"]+)',html)
if match:
self.title = match.group(1)
html = _download("%s/config"%_id)
try:
j = json.loads( html )
files = j['request']['files'].get('h264',{})
for t, fmap in files.items():
fmt = list( VIMEO_ENCODING['sd'] )
if t in VIMEO_ENCODING:
fmt = VIMEO_ENCODING[t]
fmt_data = dict(zip(YT_ENCODING_KEYS, fmt))
if 'height' in fmap:
fmt_data['resolution'] = str(fmap['height']) + 'p'
if 'bitrate' in fmap:
fmt_data['video_bitrate'] = "%0.1f" % (fmap['bitrate']/1000)
self.videos.append( Video(fmap['url'], self.filename, **fmt_data) )
except Exception as e:
raise YouTubeError("Cannot decode JSON: {0}".format(e))
|
mit
| 2,981,604,777,545,081,300
| 33.71822
| 118
| 0.505279
| false
| 3.883175
| false
| false
| false
|
Southpaw-TACTIC/TACTIC
|
src/pyasm/common/js_wrapper.py
|
1
|
5678
|
###########################################################
#
# Copyright (c) 2015, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['JsWrapper']
import tacticenv
from pyasm.common import Environment, jsonloads, jsondumps, Container
from tactic_client_lib import TacticServerStub
try:
import PyV8
HAS_PYV8 = True
except:
HAS_PYV8 = False
class PyV8:
class JSClass:
pass
def has_pyv8():
return HAS_PYV8
# Replace console.log
class MyConsole(PyV8.JSClass):
def log(self, *args):
args2 = []
for arg in args:
arg = PyV8.convert(arg)
args2.append(arg)
print(" ".join([str(x) for x in args2]))
class ApiDelegator(PyV8.JSClass):
def execute(self, func_name, args=[], kwargs={}):
server = TacticServerStub.get()
if args:
args = jsonloads(args)
if kwargs:
kwargs = jsonloads(kwargs)
if kwargs:
# Quirk ... when there is a kwargs, the last args is the kwargs
if args:
args.pop()
call = "server.%s(*args, **kwargs)" % func_name
else:
call = "server.%s(*args)" % func_name
try:
ret_val = eval(call)
except Exception as e:
print("ERROR: ", e)
raise
ret_val = jsondumps(ret_val)
return ret_val
class JSFile(object):
def copy(self, src, dst):
print("src: ", src)
print("dst: ", dst)
def move(self, src, dst):
pass
class GlobalContext(PyV8.JSClass):
console = MyConsole()
spt_delegator = ApiDelegator()
spt_file = JSFile()
class JsWrapper(object):
def __init__(self):
if HAS_PYV8:
with PyV8.JSLocker():
self.ctx = PyV8.JSContext(GlobalContext())
self.ctx.enter()
self.init()
self.ctx.leave()
def get():
key = "JsWrapper"
wrapper = Container.get(key)
if wrapper == None:
wrapper = JsWrapper()
Container.put(key, wrapper)
return wrapper
get = staticmethod(get)
def set_value(name, value):
self.ctx.locals[name] = value
def execute(self, js, kwargs={}):
if HAS_PYV8:
with PyV8.JSLocker():
self.ctx.enter()
try:
for name, value in kwargs.items():
self.ctx.locals[name] = value
data = self.ctx.eval(js)
finally:
self.ctx.leave()
return data
def execute_func(self, js, kwargs={}):
js = '''
var func = function() {
%s
}
var ret_val = func();
ret_val = JSON.stringify(ret_val);
''' % js
ret_val = self.execute(js, kwargs)
ret_val = jsonloads(ret_val)
return ret_val
def init(self):
install_dir = Environment.get_install_dir()
# initialize
js = '''
<!-- TACTIC -->
// Fixes
var spt = {};
spt.browser = {};
spt.browser.is_IE = function() { return false; }
spt.error = function(error) {
throw(error);
}
'''
self.ctx.eval(js)
sources = [
"environment.js",
"client_api.js"
]
for source in sources:
#path = "tactic/%s" % source
path = "%s/src/context/spt_js/%s" % (install_dir, source)
js = open(path).read()
self.ctx.eval(js)
js = '''
spt._delegate = function(func_name, args, kwargs) {
// convert everything to json
var args2 = [];
for (var i in args) {
args2.push(args[i]);
}
if (typeof(kwargs) == "undefined") {
kwargs = {};
}
args2 = JSON.stringify(args2);
kwargs = JSON.stringify(kwargs);
var ret_val = spt_delegator.execute(func_name, args2, kwargs);
ret_val = JSON.parse(ret_val);
return ret_val;
}
var server = TacticServerStub.get();
'''
self.ctx.eval(js)
def test():
# TEST
cmd = JsWrapper.get()
import time
start = time.time()
js = '''
console.log(server.ping() );
console.log("---");
var result = server.eval("@SOBJECT(sthpw/file)");
for (var i in result) {
var item = result[i];
if ( i > 5 ) break;
console.log(item.code);
}
'''
cmd.execute(js)
print(time.time() - start)
js = '''
console.log("---");
var result = server.get_by_search_key(result[0].__search_key__);
console.log(result.code);
'''
cmd.execute(js)
js = '''
console.log("---");
var result = server.eval("@SOBJECT(sthpw/file)", {single: true});
console.log(result.code);
'''
cmd.execute(js)
print("---")
js = '''
return ['stream1','stream2'];
''';
ret_val = cmd.execute_func(js)
print("ret_val: ", ret_val)
print("---")
js = '''
spt_file.copy("tactic.png", "tactic2.png");
''';
cmd.execute_func(js)
print("---")
kwargs = {
'a': 123,
'b': 234,
'c': "This isn't it"
}
js = '''
spt_file.copy("tactic.png", "tactic2.png");
''';
cmd.execute_func(js, kwargs)
if __name__ == '__main__':
from pyasm.security import Batch
Batch()
test()
|
epl-1.0
| 5,923,317,514,424,031,000
| 18.312925
| 75
| 0.499648
| false
| 3.623484
| false
| false
| false
|
gecos-team/gecosws-config-assistant
|
gecosws_config_assistant/view/LogTerminalDialog.py
|
1
|
4670
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Francisco Fuentes Barrera <ffuentes@solutia-it.es>"
__copyright__ = "Copyright (C) 2015, Junta de Andalucía" + \
"<devmaster@guadalinex.org>"
__license__ = "GPL-2"
import logging
import gettext
from gettext import gettext as _
import fcntl
import os
from subprocess import Popen, PIPE
from gi.repository import Gtk, Pango, GObject
from gecosws_config_assistant.view.GladeWindow import GladeWindow
gettext.textdomain('gecosws-config-assistant')
class LogTerminalDialog(GladeWindow):
'''
Dialog class that shows the system status.
'''
def __init__(self, controller, parent):
'''
Constructor
'''
self.parent = parent
self.controller = controller
self.logger = logging.getLogger('LogTerminalDialog')
self.gladepath = 'logterminal.glade'
self.data = None
self.initUI()
def get_data(self):
''' Getter data '''
return self.__data
def set_data(self, value):
''' Setter data '''
self.__data = value
def initUI(self):
''' Initialize UI '''
self.buildUI(self.gladepath)
self.logger.debug('UI initiated')
def initTerminal(self):
''' Initialize terminal '''
self.sub_proc = Popen(
"tail -n 200 -f /tmp/gecos-config-assistant.log",
shell=True,
stdout=PIPE)
self.sub_outp = ""
def non_block_read(self, output):
''' Non block read '''
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return ''
def update_terminal(self):
''' Update terminal '''
self.textBuffer.insert_at_cursor(
self.non_block_read(self.sub_proc.stdout))
return self.sub_proc.poll() is None
def extractGUIElements(self):
''' Extract GUI elements '''
self.window = self.getElementById('window1')
self.acceptButton = self.getElementById('button1')
self.statusText = self.getElementById('textview1')
self.statusText.set_editable(False)
self.statusText.set_cursor_visible(False)
self.statusText.set_justification(Gtk.Justification.LEFT)
self.statusText.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
self.textBuffer = self.statusText.get_buffer()
self.dialog = self.window
def modifyFont(self):
''' Modifying font '''
fontdesc = Pango.FontDescription("monospace")
self.statusText.modify_font(fontdesc)
def addHandlers(self):
''' Adding handlers '''
self.handlers = self.parent.get_common_handlers()
# add new handlers here
self.logger.debug("Adding back handler")
self.handlers["onBack"] = self.goBack
def show(self):
''' Show '''
self.logger.debug("Show")
self.extractGUIElements()
self.modifyFont()
self.initTerminal()
self.window.set_title(_('Log terminal'))
self.window.set_modal(True)
self.window.set_transient_for(self.parent.window)
GObject.timeout_add(100, self.update_terminal)
self.window.show_all()
x, y = self.parent.window.get_position()
w, h = self.parent.window.get_size()
sw, sh = self.window.get_size()
self.logger.debug('x={} y={} w={} h={} sw={} sh={}'.format(
x, y, w, h, sw, sh))
self.window.move(x + w/2 - sw/2, y + h/2 - sh/2)
while Gtk.events_pending():
Gtk.main_iteration()
def goBack(self, *args):
''' Go back '''
self.logger.debug("Go back")
self.dialog.destroy()
data = property(
get_data,
set_data,
None,
None)
|
gpl-2.0
| -4,645,901,874,535,072,000
| 27.469512
| 74
| 0.615335
| false
| 3.802117
| false
| false
| false
|
danbradham/nodify
|
nodify/view.py
|
1
|
3580
|
'''
view
====
Defines a view class for maintaining a graphics scene.
'''
import math
from PySide import QtCore, QtGui
class View(QtGui.QGraphicsView):
'''A View supporting smooth panning and zooming. Use Alt+Left Mouse to
pan and Alt+Middle or Right Mouse to zoom. Dragging without Alt drags out
a selection marquee.
.. seealso::
Documentation for :class:`QtGui.QGraphicsView`'''
def __init__(self, *args, **kwargs):
super(View, self).__init__(*args, **kwargs)
self.setTransformationAnchor(QtGui.QGraphicsView.NoAnchor)
self.setResizeAnchor(QtGui.QGraphicsView.NoAnchor)
self.setRubberBandSelectionMode(QtCore.Qt.IntersectsItemShape)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setRenderHint(QtGui.QPainter.Antialiasing)
# Set a massive canvas for seemingly unlimited pan and zoom
self.setSceneRect(0, 0, 32000, 32000)
self.centerOn(16000, 16000)
self._last_pos = QtCore.QPoint(0, 0)
self._drag_mod = QtCore.Qt.AltModifier
self._drag_buttons = [QtCore.Qt.LeftButton]
self._pan_buttons = [QtCore.Qt.LeftButton]
self._zoom_buttons = [QtCore.Qt.MiddleButton, QtCore.Qt.RightButton]
self._rel_scale = 1
def mousePressEvent(self, event):
'''Overloaded to support both marquee dragging and pan/zoom. Here we
setup the dragging mode and store the anchor position.'''
m = event.modifiers()
b = event.buttons()
if m == self._drag_mod or not b in self._drag_buttons:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
self._last_pos = self._anchor_pos = event.pos()
super(View, self).mousePressEvent(event)
def zoom(self, factor):
'''Zoom the view.
:param factor: Amount to scale'''
rel_scale = self._rel_scale * factor
if rel_scale < 0.2 or rel_scale > 8:
return
self._rel_scale = rel_scale
transform = self.transform()
transform.scale(factor, factor)
self.setTransform(transform)
def pan(self, x, y):
'''Pan the view.
:param x: Number of pixels in x
:param y: Number of pixels in y'''
self.translate(-x, -y)
def mouseMoveEvent(self, event):
if not event.modifiers() == QtCore.Qt.AltModifier:
super(View, self).mouseMoveEvent(event)
return
b = event.buttons()
pos = event.pos()
delta = pos - self._last_pos
if b in self._pan_buttons:
delta /= self.transform().m11()
self.pan(-delta.x(), -delta.y())
elif b in self._zoom_buttons:
old_pos = self.mapToScene(self._anchor_pos)
step = 0.02 * max(math.sqrt(delta.x() ** 2 + delta.y() ** 2), 1.0)
if delta.x() < 0 or -delta.y() < 0:
step *= -1
factor = 1 + step
self.zoom(factor) # Zoom
delta = self.mapToScene(self._anchor_pos) - old_pos
self.pan(-delta.x(), -delta.y()) # Pan to center on mouse pivot
self._last_pos = pos
def mouseReleaseEvent(self, event):
if event.modifiers() == self._drag_mod:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
super(View, self).mouseReleaseEvent(event)
|
mit
| 3,127,646,211,352,622,600
| 29.598291
| 78
| 0.609777
| false
| 3.690722
| false
| false
| false
|
gpoisoned/hars-app
|
server/server.py
|
1
|
3487
|
from flask import Flask, jsonify
import threading
import zmq
import time
import logging
from Queue import Queue
# Clear the Log file if it exists
with open("server.log", "w"):
pass
logging.basicConfig(filename='server.log',level=logging.DEBUG,\
format='%(levelname)s:%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
app = Flask(__name__)
context = zmq.Context()
servers = ['tcp://127.0.0.1:5558', 'tcp://127.0.0.1:5559']
servers_heartbeats = ['tcp://127.0.0.1:6668', 'tcp://127.0.0.1:6669']
server_nbr = 0
message_queue = Queue()
primary_router_msg = context.socket(zmq.PUB)
primary_router_msg.connect(servers[0])
backup_router_msg = context.socket(zmq.PUB)
backup_router_msg.connect(servers[1])
@app.route("/square/<int:num>")
def square(num):
message_queue.put(num)
return jsonify(status="Work will be sent to worker!")
@app.route("/")
def root():
return jsonify(status="Web server is running!")
@app.route("/health")
def health():
return jsonify(heath="It's all good :)")
def message_sender():
global servers
global server_nbr
global context
global send_message
while True:
message = message_queue.get()
print message
if server_nbr == 0:
primary_router_msg.send("%s %s" %("DATA", message))
elif server_nbr == 1:
backup_router_msg.send("%s %s" %("DATA", message))
message_queue.task_done()
# Background thread to do heartbeat with router
def heartbeat_listener():
# We want to modify the global states server_nbr
# and use global zeromq context
global servers_heartbeats
global server_nbr
global context
HEARTBEAT_TIMEOUT = 1000 * 5 # Timeout in seconds
DELAY = 3000
router_heartbeat = context.socket(zmq.REQ)
router_heartbeat.connect(servers_heartbeats[server_nbr])
poller = zmq.Poller()
poller.register(router_heartbeat, zmq.POLLIN)
heartbeat = "HB"
while True:
try:
router_heartbeat.send(heartbeat,zmq.NOBLOCK)
expect_reply = True
except:
except_reply = False
pass
while expect_reply:
socks = dict(poller.poll(HEARTBEAT_TIMEOUT))
if router_heartbeat in socks:
reply = router_heartbeat.recv(zmq.NOBLOCK)
expect_reply = False
else:
logging.warning("Router is probably dead. Connecting to backup router")
time.sleep(DELAY/1000)
# Unregister old socket and delete it
poller.unregister(router_heartbeat)
router_heartbeat.close()
# Change server and recreate sockets
server_nbr = (server_nbr + 1) % 2
router_heartbeat = context.socket(zmq.REQ)
poller.register(router_heartbeat, zmq.POLLIN)
# reconnect and resend request
router_heartbeat.connect(servers_heartbeats[server_nbr])
router_heartbeat.send(heartbeat,zmq.NOBLOCK)
if __name__ == "__main__":
app.debug = True
logging.info("Starting a heartbeat daemon process...")
listner = threading.Thread(name="Heartbeat_listener", target = heartbeat_listener).start()
sender = threading.Thread(name="Message sender", target = message_sender).start()
logging.info("**** Daemon started. Now running app server ****")
app.run(threaded=True)
logging.error("App server crashed.")
context.term()
|
apache-2.0
| 796,355,831,453,210,900
| 31.588785
| 94
| 0.629481
| false
| 3.806769
| false
| false
| false
|
jiasir/pycs
|
vulpo/resultset.py
|
1
|
6559
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from vulpo.scs.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
|
mit
| -787,891,861,120,344,300
| 36.695402
| 79
| 0.606495
| false
| 4.366844
| false
| false
| false
|
calisthenics/site
|
bin/wikipedia-bodyweight-exercise.py
|
1
|
4818
|
#!/usr/bin/env python
# coding: utf-8
import os
import re
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from logya.core import Logya
from logya.path import slugify, target_file
from logya.writer import encode_content, write
logya = Logya()
logya.init_env()
url = 'https://en.wikipedia.org/wiki/Bodyweight_exercise'
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml')
replacements = {
'bams': 'bam',
'bodybuilders': 'bodybuilder',
'boots': 'boot',
'chairs': 'chair',
'climbers': 'climber',
'crosses': 'cross',
'curls': 'curl',
'darlings': 'darling',
'dips': 'dip',
'dogs': 'dog',
'extensions': 'extension',
'humpers': 'humper',
'ins': 'in',
'kicks': 'kick',
'knives': 'knife',
'lifts': 'lift',
'little piggies': '3 little pigs',
'lunges': 'lunge',
'maybes': 'maybe',
'mikes': 'mike',
'mornings': 'morning',
'offs': 'off',
'plunges': 'plunge',
'push exercises': 'push',
'raises': 'raise',
'rotations': 'rotation',
'scissors': 'scissor',
'spidermans': 'spiderman',
'supermans': 'superman',
'swimmers': 'swimmer',
'squats': 'squat',
'ups': 'up'
}
resources = '## Resources\n\n* [Wikipedia: Bodyweight exercise]({})'.format(url)
def canonical_name(name):
name = name.strip().lower()
if name.startswith('full body'):
return ''
for source, target in replacements.items():
name = re.sub(r'\b{}\b'.format(source), target, name)
return name.title()
def clean_text(text):
return text.replace('[citation needed]', '').strip()
# Only interested in TOC numbers 4 to 8.
tocnumbers = range(4, 9)
toc1_items = soup.find(id='toc').find_all(class_='toclevel-1')
groups = [i for i in toc1_items if int(i.find('a').find(class_='tocnumber').text) in tocnumbers]
assert len(groups) == len(tocnumbers)
# Assemble exercise documents
for group in groups:
group_name = group.find('a').find(class_='toctext').text.strip()
for item in group.find('ul').find_all('a'):
href = item.attrs['href']
heading = soup.find(id=href.lstrip('#')).parent
name = canonical_name(item.find(class_='toctext').text)
groups = [canonical_name(group_name)]
body = []
variants = []
muscles = []
for sibling in heading.find_next_siblings():
if sibling.name == 'p':
body.append(clean_text(sibling.text))
elif sibling.name == 'dl':
dth = sibling.find('dt').text.strip().lower()
if dth == 'common variants':
variants = list(filter(None, [canonical_name(i.text) for i in sibling.find_all('dd') if i.text != 'none']))
elif dth == 'muscle groups':
muscles = list(filter(None, [canonical_name(i.text) for i in sibling.find_all('dd')]))
elif sibling.name == 'h3':
break
if body:
body.append(resources)
doc = {
'created': datetime.now(),
'description': body[0].split('. ')[0] + '.',
'groups': groups,
'muscles': muscles,
'template': 'exercise.html',
'title': name,
'variants': variants
}
# Files shall be saved as md files, so calling write_content directly
# is not possible as it would save as html.
filename = target_file(logya.dir_content, '/exercise/{}.md'.format(slugify(name)))
if not os.path.exists(filename):
write(filename, encode_content(doc, '\n\n'.join(body)))
# Create stub files for variants
for variant in variants:
filename = target_file(logya.dir_content, '/exercise/{}.md'.format(slugify(variant)))
if not os.path.exists(filename):
ex_variants = list(set(variants).union(set([name])).difference(set([variant])))
doc = {
'created': datetime.now(),
'description': '',
'groups': groups,
'muscles': muscles,
'template': 'exercise.html',
'title': variant,
'variants': ex_variants
}
write(filename, encode_content(doc, ''))
# Create stub files for muscles
for muscle in muscles:
filename = target_file(logya.dir_content, '/muscle/{}.md'.format(slugify(muscle)))
if not os.path.exists(filename):
doc = {
'created': datetime.now(),
'description': '',
'template': 'muscle.html',
'title': muscle
}
write(filename, encode_content(doc, ''))
|
mit
| 3,966,870,147,894,004,000
| 31.126667
| 127
| 0.547945
| false
| 3.598208
| false
| false
| false
|
gdorion/advent-of-code
|
2015/python/Day14/race.py
|
1
|
1703
|
#!/bin/env python
#
# Adventofcode.com
#
# Author : Guillaume Dorion
# Email : gdorion@gmail.com
#
class Reindeer(object):
def __init__(self, name, speed, flightTime, restTime):
self.name = name
self.speed = speed
self.flightTime = flightTime
self.restTime = restTime
# Results
self.timeFlying = 0
self.distance = 0
self.points = 0
def takeAction(self, iteration, duration):
if self.isFlying(iteration, duration):
self.distance = self.distance + self.speed
return True
def isFlying(self, iteration, duration):
if (iteration % (self.flightTime + self.restTime)) < self.flightTime:
return True
return False
reindeers = []
dt = 2503
def findLeader(reindeers):
best = 0
for r in reindeers:
if r.distance >= best:
best = r.distance
for r in reindeers:
if r.distance == best:
r.points = r.points + 1
with open('data.txt') as f:
for line in f:
l = line.rstrip('\n')
params = l.split('.')[0].split(' ')
# Set the speed all on a km/s base
reindeers.append(Reindeer(params[0], int(params[3]), int(params[6]), int(params[13])))
for second in range(dt):
for reindeer in reindeers:
reindeer.takeAction(second, dt)
findLeader(reindeers)
longest = 0
for reindeer in reindeers:
print reindeer.name + " " + str(reindeer.distance) + "(%s)" % (str(reindeer.points))
winner = reindeers[0]
for reindeer in reindeers:
if winner.points < reindeer.points:
winner = reindeer
print winner.name + ' ' + str(winner.distance) + ' ' + str(winner.points)
|
mit
| -1,761,703,601,739,938,600
| 23.328571
| 94
| 0.600117
| false
| 3.159555
| false
| false
| false
|
sahilshekhawat/ApkDecompiler
|
javadecompiler/Krakatau/java/ast2.py
|
1
|
3633
|
from . import ast
from .stringescape import escapeString as escape
class MethodDef(object):
def __init__(self, class_, flags, name, desc, retType, paramDecls, body):
self.flagstr = flags + ' ' if flags else ''
self.retType, self.paramDecls = retType, paramDecls
self.body = body
self.comment = None
self.triple = class_.name, name, desc
if name == '<clinit>':
self.isStaticInit, self.isConstructor = True, False
elif name == '<init>':
self.isStaticInit, self.isConstructor = False, True
self.clsname = ast.TypeName((class_.name, 0))
else:
self.isStaticInit, self.isConstructor = False, False
def print_(self, printer, print_):
argstr = ', '.join(print_(decl) for decl in self.paramDecls)
if self.isStaticInit:
header = 'static'
elif self.isConstructor:
name = print_(self.clsname).rpartition('.')[-1]
header = '{}{}({})'.format(self.flagstr, name, argstr)
else:
name = printer.methodName(*self.triple)
header = '{}{} {}({})'.format(self.flagstr, print_(self.retType), escape(name), argstr)
if self.comment:
header = '//{}\n{}'.format(self.comment, header)
if self.body is None:
return header + ';\n'
else:
return header + '\n' + print_(self.body)
class FieldDef(object):
def __init__(self, flags, type_, class_, name, desc, expr=None):
self.flagstr = flags + ' ' if flags else ''
self.type_ = type_
self.name = name
self.expr = None if expr is None else ast.makeCastExpr(type_.tt, expr)
self.triple = class_.name, name, desc
def print_(self, printer, print_):
name = escape(printer.fieldName(*self.triple))
if self.expr is not None:
return '{}{} {} = {};'.format(self.flagstr, print_(self.type_), name, print_(self.expr))
return '{}{} {};'.format(self.flagstr, print_(self.type_), name)
class ClassDef(object):
def __init__(self, flags, isInterface, name, superc, interfaces, fields, methods):
self.flagstr = flags + ' ' if flags else ''
self.isInterface = isInterface
self.name = ast.TypeName((name,0))
self.super = ast.TypeName((superc,0)) if superc is not None else None
self.interfaces = [ast.TypeName((iname,0)) for iname in interfaces]
self.fields = fields
self.methods = methods
if superc == 'java/lang/Object':
self.super = None
def print_(self, printer, print_):
contents = ''
if self.fields:
contents = '\n'.join(print_(x) for x in self.fields)
if self.methods:
if contents:
contents += '\n\n' #extra line to divide fields and methods
contents += '\n\n'.join(print_(x) for x in self.methods)
indented = [' '+line for line in contents.splitlines()]
name = print_(self.name).rpartition('.')[-1]
defname = 'interface' if self.isInterface else 'class'
header = '{}{} {}'.format(self.flagstr, defname, name)
if self.super:
header += ' extends ' + print_(self.super)
if self.interfaces:
if self.isInterface:
assert(self.super is None)
header += ' extends ' + ', '.join(print_(x) for x in self.interfaces)
else:
header += ' implements ' + ', '.join(print_(x) for x in self.interfaces)
lines = [header + ' {'] + indented + ['}']
return '\n'.join(lines)
|
gpl-2.0
| -7,945,223,551,241,829,000
| 39.831461
| 100
| 0.56262
| false
| 3.889722
| false
| false
| false
|
apieum/inxpect
|
inxpect/expect/property.py
|
1
|
1966
|
# -*- coding: utf8 -*-
from .chain import AndChain
from .operator import *
from .should import Should, ShouldNot
class DefaultProperty(object):
def __init__(self, getter=None, returns=AndChain):
self.should = Should(getter, returns)
self.should_not = ShouldNot(getter, returns)
def equal_to(self, expected, closure=None):
return self.should(Equal, expected, closure)
def not_equal_to(self, expected, closure=None):
return self.should_not(Equal, expected, closure)
def lower_than(self, expected, closure=None):
return self.should(LowerThan, expected, closure)
def lower_or_equal_than(self, expected, closure=None):
return self.should(LowerOrEqualThan, expected, closure)
def greater_than(self, expected, closure=None):
return self.should(GreaterThan, expected, closure)
def greater_or_equal_than(self, expected, closure=None):
return self.should(GreaterOrEqualThan, expected, closure)
def same_as(self, expected, closure=None):
return self.should(SameAs, expected, closure)
def not_same_as(self, expected, closure=None):
return self.should_not(SameAs, expected, closure)
def type_is(self, expected, closure=None):
return self.should(TypeIs, expected, closure)
def type_is_not(self, expected, closure=None):
return self.should_not(TypeIs, expected, closure)
def instance_of(self, expected, closure=None):
return self.should(InstanceOf, expected, closure)
def not_instance_of(self, expected, closure=None):
return self.should_not(InstanceOf, expected, closure)
@property
def len(self):
return DefaultProperty(getter=self.should.closure(len))
def __get__(self, instance, ownerCls):
return self
__eq__ = equal_to
__ne__ = not_equal_to
__lt__ = lower_than
__gt__ = greater_than
__le__ = lower_or_equal_than
__ge__ = greater_or_equal_than
|
lgpl-3.0
| -9,122,398,435,168,640,000
| 31.229508
| 65
| 0.671414
| false
| 3.737643
| false
| false
| false
|
Jumpscale/core9
|
JumpScale9/clients/tarantool/templates/python/model.py
|
1
|
3184
|
from js9 import j
import os
import capnp
# import msgpack
import base64
ModelBaseCollection = j.data.capnp.getModelBaseClassCollection()
ModelBase = j.data.capnp.getModelBaseClass()
# from JumpScale9.clients.tarantool.KVSInterface import KVSTarantool
class $NameModel(ModelBase):
'''
'''
def __init__(self):
ModelBase.__init__(self)
def index(self):
#no need to put indexes because will be done by capnp
pass
def save(self):
self.reSerialize()
self._pre_save()
buff = self.dbobj.to_bytes()
key=self.key
# key=msgpack.dumps(self.key)
# key=base64.b64encode(self.key.encode())
return self.collection.client.call("model_$name_set",(key,buff))
def delete(self):
key=self.key
# key=base64.b64encode(self.key.encode())
return self.collection.client.call("model_$name_del",(key))
class $NameCollection(ModelBaseCollection):
'''
This class represent a collection of $Names
It's used to list/find/create new Instance of $Name Model object
'''
def __init__(self):
category = '$name'
namespace = ""
# instanciate the KVS interface on top of tarantool
# cl = j.clients.tarantool.client_get() # will get the tarantool from the config file, the main connection
# db = KVSTarantool(cl, category)
# mpath = j.sal.fs.getDirName(os.path.abspath(__file__)) + "/model.capnp"
# SchemaCapnp = j.data.capnp.getSchemaFromPath(mpath, name='$Name')
self.client = j.clients.tarantool.client_get() #will get the tarantool from the config file, the main connection
mpath=j.sal.fs.getDirName(os.path.abspath(__file__))+"/model.capnp"
SchemaCapnp=j.data.capnp.getSchemaFromPath(mpath,name='$Name')
super().__init__(SchemaCapnp, category=category, namespace=namespace, modelBaseClass=$NameModel, db=self.client, indexDb=self.client)
self.client.db.encoding=None
def new(self):
return $NameModel(collection=self, new=True)
def get(self,key):
resp=self.client.call("model_$name_get",key)
if len(resp.data) <= 1 and len(resp.data[0]) > 2:
raise KeyError("value for %s not found" % key)
value = resp.data[0][1]
return $NameModel(key=key,collection=self, new=False,data=value)
# BELOW IS ALL EXAMPLE CODE WHICH NEEDS TO BE REPLACED
def list(self):
resp=self.client.call("model_$name_list")
return [item.decode() for item in resp[0]]
# def list(self, actor="", service="", action="", state="", serviceKey="", fromEpoch=0, toEpoch=9999999999999,tags=[]):
# raise NotImplementedError()
# return res
# def find(self, actor="", service="", action="", state="", serviceKey="", fromEpoch=0, toEpoch=9999999999999, tags=[]):
# raise NotImplementedError()
# res = []
# for key in self.list(actor, service, action, state, serviceKey, fromEpoch, toEpoch, tags):
# if self.get(key):
# res.append(self.get(key))
# return res
|
apache-2.0
| -5,462,478,332,331,678,000
| 35.181818
| 141
| 0.618719
| false
| 3.553571
| false
| false
| false
|
dann/python-classpluggable
|
setup.py
|
1
|
1506
|
import os
import sys
from setuptools import setup, find_packages
if sys.version_info[:2] < (2, 6):
raise RuntimeError('Requires Python 2.6 or better')
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
except IOError:
README = CHANGES = ''
install_requires=[
'setuptools',
]
tests_require = install_requires + [
'nose',
]
setup(name='classpluggable',
version='0.01',
description=('classpluggable project'),
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: Unix",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords='',
author="dann",
author_email="techmemo@gmail.com ",
url="http://github.com/dann/python-classpluggable",
license="New BSD License",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires = install_requires,
tests_require = tests_require,
test_suite = 'nose.collector',
entry_points = """\
"""
)
|
bsd-3-clause
| 6,244,364,089,358,640,000
| 27.980769
| 70
| 0.605578
| false
| 3.911688
| false
| true
| false
|
Onager/l2tdevtools
|
l2tdevtools/dependency_writers/dpkg.py
|
1
|
4671
|
# -*- coding: utf-8 -*-
"""Writer for Debian packaging (dpkg) files."""
from __future__ import unicode_literals
import io
import os
from l2tdevtools.dependency_writers import interface
class DPKGCompatWriter(interface.DependencyFileWriter):
"""Dpkg compat file writer."""
PATH = os.path.join('config', 'dpkg', 'compat')
_FILE_CONTENT = '9\n'
def Write(self):
"""Writes a dpkg control file."""
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(self._FILE_CONTENT)
class DPKGControlWriter(interface.DependencyFileWriter):
"""Dpkg control file writer."""
PATH = os.path.join('config', 'dpkg', 'control')
_PYTHON3_FILE_HEADER = [
'Source: {project_name:s}',
'Section: python',
'Priority: extra',
'Maintainer: {maintainer:s}',
'Build-Depends: debhelper (>= 9), dh-python, {build_dependencies:s}',
'Standards-Version: 4.1.4',
'X-Python3-Version: >= 3.5',
'Homepage: {homepage_url:s}',
''] # yapf: disable
_DATA_PACKAGE = [
'Package: {project_name:s}-data',
'Architecture: all',
'Depends: ${{misc:Depends}}',
'Description: Data files for {name_description:s}',
'{description_long:s}',
''] # yapf: disable
_PYTHON3_PACKAGE = [
'Package: python3-{project_name:s}',
'Architecture: all',
('Depends: {python3_dependencies:s}'
'${{python3:Depends}}, ${{misc:Depends}}'),
'Description: Python 3 module of {name_description:s}',
'{description_long:s}',
''] # yapf: disable
_TOOLS_PACKAGE = [
'Package: {project_name:s}-tools',
'Architecture: all',
('Depends: python3-{project_name:s} (>= ${{binary:Version}}), '
'${{python3:Depends}}, ${{misc:Depends}}'),
'Description: Tools of {name_description:s}',
'{description_long:s}',
''] # yapf: disable
def Write(self):
"""Writes a dpkg control file."""
file_content = []
file_content.extend(self._PYTHON3_FILE_HEADER)
data_dependency = ''
if os.path.isdir('data'):
data_dependency = '{0:s}-data (>= ${{binary:Version}})'.format(
self._project_definition.name)
file_content.extend(self._DATA_PACKAGE)
file_content.extend(self._PYTHON3_PACKAGE)
if (os.path.isdir('scripts') or os.path.isdir('tools') or
self._project_definition.name == 'timesketch'):
file_content.extend(self._TOOLS_PACKAGE)
description_long = self._project_definition.description_long
description_long = '\n'.join(
[' {0:s}'.format(line) for line in description_long.split('\n')])
python3_dependencies = self._dependency_helper.GetDPKGDepends(
python_version=3)
if data_dependency:
python3_dependencies.insert(0, data_dependency)
python3_dependencies = ', '.join(python3_dependencies)
if python3_dependencies:
python3_dependencies = '{0:s}, '.format(python3_dependencies)
build_dependencies = ['python3-all (>= 3.5~)', 'python3-setuptools']
if self._project_definition.name == 'timesketch':
build_dependencies.insert(0, 'dh-systemd (>= 1.5)')
build_dependencies.append('python3-pip')
build_dependencies = ', '.join(build_dependencies)
template_mappings = {
'build_dependencies': build_dependencies,
'description_long': description_long,
'description_short': self._project_definition.description_short,
'homepage_url': self._project_definition.homepage_url,
'maintainer': self._project_definition.maintainer,
'name_description': self._project_definition.name_description,
'project_name': self._project_definition.name,
'python3_dependencies': python3_dependencies}
file_content = '\n'.join(file_content)
file_content = file_content.format(**template_mappings)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
class DPKGRulesWriter(interface.DependencyFileWriter):
"""Dpkg rules file writer."""
PATH = os.path.join('config', 'dpkg', 'rules')
_FILE_CONTENT = [
'#!/usr/bin/make -f',
'',
'%:',
'\tdh $@ --buildsystem=pybuild --with=python3',
'',
'.PHONY: override_dh_auto_test',
'override_dh_auto_test:',
'',
'']
def Write(self):
"""Writes a dpkg control file."""
template_mappings = {
'project_name': self._project_definition.name}
file_content = '\n'.join(self._FILE_CONTENT)
file_content = file_content.format(**template_mappings)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
|
apache-2.0
| -5,977,090,796,422,307,000
| 30.348993
| 75
| 0.627917
| false
| 3.522624
| false
| false
| false
|
redhat-cip/dci-control-server
|
tests/api/v1/test_jobs_update.py
|
1
|
1718
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def test_update_jobs(admin, remoteci_context, job_user_id, topic_user_id):
# test update schedule latest components
data = {
'name': 'pname',
'type': 'type_1',
'url': 'http://example.com/',
'topic_id': topic_user_id,
'state': 'active'}
c1 = admin.post('/api/v1/components', data=data).data['component']['id']
data.update({'type': 'type_2', 'name': 'pname1'})
c2 = admin.post('/api/v1/components', data=data).data['component']['id']
data.update({'type': 'type_3', 'name': 'pname2'})
c3 = admin.post('/api/v1/components', data=data).data['component']['id']
latest_components = {c1, c2, c3}
r = remoteci_context.post('/api/v1/jobs/%s/update' % job_user_id)
assert r.status_code == 201
update_job = r.data['job']
assert update_job['update_previous_job_id'] == job_user_id
assert update_job['topic_id'] == topic_user_id
update_cmpts = admin.get('/api/v1/jobs/%s/components' % update_job['id'])
update_cmpts = {cmpt['id'] for cmpt in update_cmpts.data['components']}
assert latest_components == update_cmpts
|
apache-2.0
| -6,182,659,063,903,803,000
| 39.904762
| 77
| 0.658324
| false
| 3.342412
| false
| false
| false
|
fossfreedom/alternative-toolbar
|
alttoolbar_sidebar.py
|
1
|
22724
|
# -*- Mode: python; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
#
# Copyright (C) 2015 - 2020 David Mohammed <fossfreedom@ubuntu.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import gettext
from alttoolbar_controller import AltControllerCategory
from alttoolbar_preferences import CoverLocale
from alttoolbar_preferences import GSetting
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import Gtk
from gi.repository import Pango
from gi.repository import RB
class AltToolbarSidebar(Gtk.TreeView):
expanders = GObject.property(type=str, default='{1:True}')
def __init__(self, toolbar, rbtree):
"""
Initialises the object.
"""
super(AltToolbarSidebar, self).__init__()
self.shell = toolbar.shell
self.toolbar = toolbar
self.plugin = toolbar.plugin
self.rbtree = rbtree
self._drag_dest_source = None
self._drag_motion_counter = -1
# locale stuff
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
self.set_name("AltToolbarSideBar")
self._category = {}
self._last_click_source = None
self._user_clicked = False
gs = GSetting()
plugin_settings = gs.get_setting(gs.Path.PLUGIN)
plugin_settings.bind(gs.PluginKey.EXPANDERS, self, 'expanders',
Gio.SettingsBindFlags.DEFAULT)
# title, source, visible
self.treestore = Gtk.TreeStore.new([str, GObject.Object, bool])
self.treestore_filter = self.treestore.filter_new(root=None)
self.treestore_filter.set_visible_column(2)
self.set_model(self.treestore_filter)
context = self.get_style_context()
context.add_class(Gtk.STYLE_CLASS_SIDEBAR)
self.set_headers_visible(False)
# define the headers - not visible by default
def define_category(text, category):
local = self.treestore.append(None)
self.treestore[local] = [text, None, False]
self._category[category] = local
define_category(_("Local collection"), AltControllerCategory.LOCAL)
define_category(_("Online sources"), AltControllerCategory.ONLINE)
define_category(_("Other sources"), AltControllerCategory.OTHER)
define_category(_("Playlists"), AltControllerCategory.PLAYLIST)
def delayed(*args):
model = self.shell.props.display_page_model
rootiter = model.get_iter_first()
depth = 0
self._traverse_rows(model, rootiter, None, depth)
# switch on/off headers depending upon what's in the model
self._refresh_headers()
# tidy up syncing by connecting signals
self._connect_signals()
# now expand or collapse each expander that we have saved from a
# previous session
expanders = eval(self.expanders)
print(expanders)
print(self.expanders)
for category in expanders:
print(category)
path = self.treestore.get_path(self._category[category])
if path and expanders[category]:
# self._user_clicked = True
self.expand_row(path, False) # expanders[category])
return False
GLib.timeout_add_seconds(1, delayed)
column = Gtk.TreeViewColumn.new()
column.set_fixed_width(5)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self.append_column(column)
column = Gtk.TreeViewColumn.new()
pixbuf_renderer = Gtk.CellRendererPixbuf()
column.pack_start(pixbuf_renderer, False)
renderer = Gtk.CellRendererText()
renderer.connect('edited', self.on_renderertext_edited)
self.text_renderer = renderer
column.pack_start(renderer, False)
column.set_cell_data_func(pixbuf_renderer, self._set_pixbuf)
column.set_cell_data_func(renderer, self._set_text)
self.tree_column = column
self.append_column(column)
self.set_expander_column(column)
self.show_all()
self.set_can_focus(True)
cl = CoverLocale()
cl.switch_locale(cl.Locale.RB)
def _connect_signals(self):
# display_page_model signals to keep the sidebar model in sync
model = self.shell.props.display_page_model
self._cpi = model.connect('page-inserted', self._model_page_inserted)
self._crd = model.connect('row-deleted', self._model_page_deleted)
# self._crc = model.connect('row-changed', self._model_page_changed)
# when we click on the sidebar -
# need to keep the display_page_tree in sync
self.connect('button-press-event', self._row_click)
# and visa versa
tree = self.shell.props.display_page_tree
tree.props.model.connect('row-inserted', self._tree_inserted)
tree.connect('selected',
self._display_page_tree_selected)
self.shell.props.shell_player.connect('playing-song-changed',
self._on_playing_song_changed)
# drag drop
self.enable_model_drag_dest([], Gdk.DragAction.COPY)
self.drag_dest_add_uri_targets()
self.connect('drag-drop', self.on_drag_drop)
self.connect('drag-data-received',
self.on_drag_data_received)
self.connect('drag-motion', self.on_drag_motion)
def cleanup(self):
model = self.shell.props.display_page_model
model.disconnect(self._cpi)
model.disconnect(self._crd)
# model.disconnect(self._crc)
def on_drag_drop(self, widget, context, x, y, time):
"""
Callback called when a drag operation finishes over the treeview
It decides if the dropped item can be processed.
"""
print("on_drag_drop")
# stop the propagation of the signal (deactivates superclass callback)
widget.stop_emission_by_name('drag-drop')
target = self.drag_dest_find_target(context, None)
widget.drag_get_data(context, target, time)
self._drag_dest_source = None
return True
def on_drag_motion(self, widget, drag_context, x, y, time):
path = False
try:
path, pos = widget.get_dest_row_at_pos(x, y)
except:
pass
result = False
if path and (
pos == Gtk.TreeViewDropPosition.BEFORE or pos == Gtk.TreeViewDropPosition.AFTER):
if pos == Gtk.TreeViewDropPosition.BEFORE:
drop_pos = Gtk.TreeViewDropPosition.INTO_OR_BEFORE
else:
drop_pos = Gtk.TreeViewDropPosition.INTO_OR_AFTER
widget.set_drag_dest_row(None, drop_pos)
# Gdk.drag_status(drag_context, 0, time)
path = None
if path:
dest_source = self.treestore_filter[path][1]
try:
# note - some sources dont have a can_paste method so need to
# trap this case
if not dest_source:
result = False
elif dest_source.can_paste():
result = True
except:
result = False
if dest_source and result:
if dest_source != self._drag_dest_source:
if self._drag_motion_counter != -1:
self._drag_motion_counter = 0
self._drag_dest_source = dest_source
def delayed(*args):
if self._drag_motion_counter < 2 and \
self._drag_dest_source:
self._drag_motion_counter += 1
return True
if self._drag_dest_source \
and self._drag_motion_counter >= 2:
tree = self.shell.props.display_page_tree
if tree:
tree.select(self._drag_dest_source)
self.rbtree.expand_all()
self._drag_motion_counter = -1
return False
if self._drag_motion_counter == -1:
self._drag_motion_counter = 0
GLib.timeout_add_seconds(1, delayed)
if result:
Gdk.drag_status(drag_context, Gdk.DragAction.COPY, time)
else:
Gdk.drag_status(drag_context, 0, time)
self._drag_dest_source = None
return not result
def on_drag_data_received(self, widget, drag_context, x, y, data, info,
time):
"""
Callback called when the drag source has prepared the data (pixbuf)
for us to use.
"""
print("on_drag_data_received")
# stop the propagation of the signal (deactivates superclass callback)
widget.stop_emission_by_name('drag-data-received')
path, pos = widget.get_dest_row_at_pos(x, y)
dest_source = self.treestore_filter[path][1]
drag_context.finish(True, False, time)
uris = data.get_uris()
entries = []
for uri in uris:
entry = self.shell.props.db.entry_lookup_by_location(uri)
if entry:
entries.append(entry)
dest_source.paste(entries)
def _on_playing_song_changed(self, *args):
"""
signal when a playing song changes - need to invoke a tree-refresh
to ensure the user can see which source
:param args:
:return:
"""
print("playing song changed")
if hasattr(self.plugin, "db"): # curious crash when exiting - lets not
# send the queue_draw in this case
print("queuing")
self.queue_draw()
def on_renderertext_edited(self, renderer, path, new_text):
print("edited")
print(path)
print(new_text)
self.treestore_filter[path][1].props.name = new_text
def _traverse_rows(self, store, treeiter, new_parent_iter, depth):
while treeiter is not None:
# print(depth, store[treeiter][1])
# print(depth, store[treeiter][1].props.name)
if isinstance(store[treeiter][1], RB.DisplayPageGroup):
if store.iter_has_child(treeiter):
childiter = store.iter_children(treeiter)
self._traverse_rows(store, childiter, treeiter, depth)
treeiter = store.iter_next(treeiter)
continue
if depth == 0:
category_iter = self._get_category_iter(store[treeiter][1])
leaf_iter = self.treestore.append(category_iter)
else:
leaf_iter = self.treestore.append(new_parent_iter)
self.treestore[leaf_iter][1] = store[treeiter][1]
self.treestore[leaf_iter][0] = ""
self.treestore[leaf_iter][2] = True
if store.iter_has_child(treeiter):
childiter = store.iter_children(treeiter)
self._traverse_rows(store, childiter, leaf_iter, depth + 1)
treeiter = store.iter_next(treeiter)
# def _model_page_changed(self, model, path, page_iter):
# print(model[page_iter][1].props.name)
# print(path)
# # self._model_page_inserted(model, path, page_iter)
def _tree_inserted(self, model, path, page_iter):
print(path)
print(page_iter)
print(model[path][1].props.name)
print(model[path][1])
self._model_page_inserted(model, model[path][1], page_iter)
def _model_page_inserted(self, model, page, page_iter):
if page and not page.props.visibility:
return # we don't display sources that are marked as hidden
print(page)
print(page_iter)
parent_iter = model.iter_parent(page_iter)
print(parent_iter)
def find_lookup_rows(store, treeiter, page):
while treeiter is not None:
found_page = store[treeiter][1]
print(found_page)
if found_page is not None and found_page == page:
# print("found %s" % found_page.props.name)
return treeiter
if store.iter_has_child(treeiter):
childiter = store.iter_children(treeiter)
ret = find_lookup_rows(store, childiter, page)
if ret:
return ret
treeiter = store.iter_next(treeiter)
print("nothing found")
return None
# first check if we've already got the page in the model
rootiter = self.treestore.get_iter_first()
if find_lookup_rows(self.treestore, rootiter, page):
return
if (parent_iter and isinstance(model[parent_iter][1],
RB.DisplayPageGroup)) or \
not parent_iter:
# the parent of the inserted row is a top-level item in the
# display-page-model
# print("top level")
category_iter = self._get_category_iter(page)
leaf_iter = self.treestore.append(category_iter)
else:
# the parent is another source so we need to find the iter in our
# model to hang it off
# print("child level")
searchpage = model[parent_iter][1]
# print("####", searchpage)
leaf_iter = find_lookup_rows(self.treestore, rootiter, searchpage)
# print("##2", leaf_iter)
leaf_iter = self.treestore.append(leaf_iter)
self.treestore[leaf_iter][1] = page
self.treestore[leaf_iter][0] = ""
self.treestore[leaf_iter][2] = True
self._refresh_headers()
if "PlaylistSource" in type(page).__name__:
# a playlist of somesort has been added - so lets put the user into
# edit mode
self.edit_playlist(leaf_iter)
self.rbtree.expand_all()
def edit_playlist(self, leaf_iter):
"""
edit the playlist
:param leaf_iter: treestore iter
:return:
"""
print("edit_playlist")
self.text_renderer.props.editable = True
path = self.treestore.get_path(leaf_iter)
path = self.treestore_filter.convert_child_path_to_path(path)
print(path)
self.grab_focus()
def delayed(*args):
self.set_cursor_on_cell(path,
self.tree_column, self.text_renderer, True)
GLib.timeout_add_seconds(1, delayed, None)
def _model_page_deleted(self, model, path):
"""
signal from the displaytreemodel - we dont actually know what is
deleted ... just that something has been
:param model:
:param path:
:return:
"""
# first do a reverse lookup so that we can search quicker later
# dict of sources in the sidebar model with their treeiter
lookup = {}
rootiter = self.treestore.get_iter_first()
def find_lookup_rows(store, treeiter):
while treeiter is not None:
# if store[treeiter][0] == "":
# lookup[store[treeiter][1]] = treeiter
if store[treeiter][1] is not None:
lookup[store[treeiter][1]] = treeiter
if store.iter_has_child(treeiter):
childiter = store.iter_children(treeiter)
find_lookup_rows(store, childiter)
treeiter = store.iter_next(treeiter)
find_lookup_rows(self.treestore, rootiter)
# next iterate through the displaytreemodel - where we have a matching
# source, delete it from our lookup
def find_rows(store, treeiter):
while treeiter is not None:
if store[treeiter][1] in lookup:
del lookup[store[treeiter][1]]
if store.iter_has_child(treeiter):
childiter = store.iter_children(treeiter)
find_rows(store, childiter)
treeiter = store.iter_next(treeiter)
rootiter = model.get_iter_first()
find_rows(model, rootiter)
# from what is left is the stuff to remove from our treeview
# (treestore)
for source in lookup:
self.treestore.remove(lookup[source])
self._refresh_headers()
def _row_click(self, widget, event):
"""
event called when clicking on a row
"""
print('_row_click')
try:
treepath, treecolumn, cellx, celly = \
widget.get_path_at_pos(event.x, event.y)
except:
print("exit")
return
active_object = self.treestore_filter[treepath][1]
print(active_object)
if active_object:
# we have a source
self._user_clicked = True
self.shell.props.display_page_tree.select(active_object)
self.rbtree.expand_all()
if self._last_click_source == active_object:
self.text_renderer.props.editable = \
"PlaylistSource" in type(active_object).__name__
else:
self.text_renderer.props.editable = False
self._last_click_source = active_object
def delayed(*args):
# save current state of each category in the treeview
cat_vals = {}
for category in self._category:
path = self.treestore.get_path(self._category[category])
if path:
cat_vals[category] = self.row_expanded(path)
self.expanders = str(cat_vals)
print(self.expanders)
GLib.timeout_add_seconds(1, delayed)
def _display_page_tree_selected(self, display_page_tree, page):
"""
signal from when a page is selected in the display-page-tree -
we need to sync with our tree
:param display_page_tree:
:param page:
:return:
"""
if self._user_clicked:
self._user_clicked = False
return
# first do a reverse lookup so that we can search quicker later
# dict of sources in the sidebar model with their treeiter
lookup = {}
rootiter = self.treestore_filter.get_iter_first()
def find_lookup_rows(store, treeiter):
while treeiter is not None:
if store[treeiter][1] is not None:
lookup[store[treeiter][1]] = treeiter
print(store[treeiter][1].props.name)
if store.iter_has_child(treeiter):
childiter = store.iter_children(treeiter)
find_lookup_rows(store, childiter)
treeiter = store.iter_next(treeiter)
find_lookup_rows(self.treestore_filter, rootiter)
if page in lookup:
path = self.treestore_filter.get_path(lookup[page])
self.expand_to_path(path)
self.set_cursor(path)
def _set_text(self, column, renderer, model, treeiter, arg):
if treeiter is None:
return
if model is None:
return
source = model[treeiter][1]
if source is None:
renderer.props.weight = Pango.Weight.BOLD
renderer.props.text = model[treeiter][0]
print(renderer.props.text)
renderer.props.visible = model[treeiter][2]
else:
renderer.props.visible = True
player = self.shell.props.shell_player
playing = \
player.get_playing and player.get_playing_source() == source
if (source.props.name):
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
translation = gettext.gettext(source.props.name)
cl.switch_locale(cl.Locale.RB)
renderer.props.text = translation
else:
renderer.props.text = ""
if playing:
renderer.props.weight = Pango.Weight.BOLD
else:
renderer.props.weight = Pango.Weight.NORMAL
renderer.props.ypad = 3
path = model.get_path(treeiter)
if path.get_depth() == 1:
renderer.props.ypad = 6
renderer.props.xpad = 3
else:
renderer.props.ypad = 3
renderer.props.xpad = 0
renderer.props.ellipsize = Pango.EllipsizeMode.END
def _refresh_headers(self):
treeiter = self.treestore.get_iter_first()
while treeiter is not None:
self.treestore[treeiter][2] = \
self.treestore.iter_has_child(treeiter)
treeiter = self.treestore.iter_next(treeiter)
def _set_pixbuf(self, column, renderer, model, treeiter, arg):
source = model[treeiter][1]
if source is None:
renderer.props.pixbuf = None
else:
ret_bool, controller = self.toolbar.is_controlled(source)
renderer.props.gicon = controller.get_gicon(source)
renderer.props.follow_state = True
path = model.get_path(treeiter)
if path.get_depth() == 2:
renderer.props.visible = True # must be a child so show the
# pixbuf renderer
else:
renderer.props.visible = False # headers or children of child
# dont have pixbuf's so no renderer to display
renderer.props.xpad = 3
def _get_category_iter(self, source):
ret_bool, controller = self.toolbar.is_controlled(source)
category = AltControllerCategory.OTHER
if ret_bool:
category = controller.get_category()
return self._category[category]
|
gpl-3.0
| -7,948,833,192,442,559,000
| 34.673469
| 97
| 0.573271
| false
| 4.070213
| false
| false
| false
|
ebt-hpc/cca
|
cca/scripts/outlining_queries_fortran.py
|
1
|
19126
|
#!/usr/bin/env python3
'''
A script for outlining Fortran programs
Copyright 2013-2018 RIKEN
Copyright 2018-2019 Chiba Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'Masatomo Hashimoto <m.hashimoto@stair.center>'
from ns import NS_TBL
OMITTED = ['execution-part','do-block']
SUBPROGS = set([
'subroutine-external-subprogram',
'subroutine-internal-subprogram',
'subroutine-module-subprogram',
'function-external-subprogram',
'function-internal-subprogram',
'function-module-subprogram',
])
LOOPS = set(['do-construct','do-stmt','end-do-stmt','do-block'])
CALLS = set(['call-stmt','function-reference','part-name','call-stmt*','mpi-call'])
TYPE_TBL = { # cat -> type
'file' : 'file',
'do-construct' : 'loop',
'if-construct' : 'branch',
'case-construct' : 'branch',
'select-type-construct' : 'branch',
'where-construct' : 'branch',
'call-stmt' : 'call',
'function-reference' : 'call',
'part-name' : 'call',
'main-program' : 'main',
'subroutine-external-subprogram' : 'subroutine',
'subroutine-internal-subprogram' : 'subroutine',
'subroutine-module-subprogram' : 'subroutine',
'function-external-subprogram' : 'function',
'function-internal-subprogram' : 'function',
'function-module-subprogram' : 'function',
'execution-part' : 'part',
'if-then-block' : 'block',
'else-if-block' : 'block',
'else-block' : 'block',
'case-block' : 'block',
'type-guard-block' : 'block',
'where-block' : 'block',
'do-block' : 'block',
'block-construct' : 'block',
'pp-branch' : 'pp',
'pp-branch-do' : 'pp',
'pp-branch-end-do' : 'pp',
'pp-branch-if' : 'pp',
'pp-branch-end-if' : 'pp',
'pp-branch-forall' : 'pp',
'pp-branch-end-forall' : 'pp',
'pp-branch-select' : 'pp',
'pp-branch-end-select' : 'pp',
'pp-branch-where' : 'pp',
'pp-branch-end-where' : 'pp',
'pp-branch-pu' : 'pp',
'pp-branch-end-pu' : 'pp',
'pp-branch-function' : 'pp',
'pp-branch-end-function' : 'pp',
'pp-branch-subroutine' : 'pp',
'pp-branch-end-subroutine' : 'pp',
'pp-section-elif' : 'pp',
'pp-section-else' : 'pp',
'pp-section-if' : 'pp',
'pp-section-ifdef' : 'pp',
'pp-section-ifndef' : 'pp',
'mpi-call' : 'mpi',
'call-stmt*' : 'call*'
}
Q_AA_IN_LOOP_F = '''DEFINE input:inference "ont.cpi"
PREFIX f: <%(f_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?loop ?aa ?pn ?dtor ?dtor_loc
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?pn ?aa ?loop ?pu_name ?vpu_name ?loc ?ver
WHERE {
?pn a f:PartName ;
src:parent ?aa .
?aa a f:ArrayAccess ;
f:inDoConstruct ?loop .
?loop a f:DoConstruct ;
f:inProgramUnitOrSubprogram ?pu_or_sp ;
f:inProgramUnit ?pu .
?pu_or_sp src:inFile/src:location ?loc .
?pu a f:ProgramUnit ;
src:inFile/src:location ?pu_loc ;
ver:version ?ver .
OPTIONAL {
?pu f:name ?pu_name
}
OPTIONAL {
?pu f:includedInProgramUnit ?vpu .
?vpu f:name ?vpu_name .
}
} GROUP BY ?pn ?aa ?loop ?pu_name ?vpu_name ?loc ?ver
}
OPTIONAL {
?pn f:declarator ?dtor .
?dtor a f:Declarator ;
f:inProgramUnitOrFragment/src:inFile ?dtor_file .
?dtor_file a src:File ;
src:location ?dtor_loc ;
ver:version ?ver .
}
}
}
''' % NS_TBL
Q_OTHER_CALLS_F = '''DEFINE input:inference "ont.cpi"
PREFIX f: <%(f_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?sp ?sp_cat ?sub ?main ?prog ?call ?callee_name ?constr
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?pu ?pu_name ?vpu_name ?call ?callee_name
WHERE {
?call a f:CallStmt ;
f:name ?callee_name ;
f:inProgramUnitOrSubprogram ?pu_or_sp ;
f:inProgramUnit ?pu .
?pu_or_sp src:inFile/src:location ?loc .
?pu a f:ProgramUnit ;
src:inFile/src:location ?pu_loc ;
ver:version ?ver .
OPTIONAL {
?pu f:name ?pu_name
}
OPTIONAL {
?pu f:includedInProgramUnit ?vpu .
?vpu f:name ?vpu_name .
}
FILTER NOT EXISTS {
?call f:mayCall ?callee .
}
} GROUP BY ?ver ?loc ?pu ?pu_name ?vpu_name ?call ?callee_name
}
OPTIONAL {
?call f:inSubprogram ?sp .
?sp a f:Subprogram ;
a ?sp_cat0 OPTION (INFERENCE NONE) ;
f:name ?sub .
FILTER NOT EXISTS {
?call f:inSubprogram ?sp0 .
?sp0 f:inSubprogram ?sp .
FILTER (?sp != ?sp0)
}
GRAPH <http://codinuum.com/ont/cpi> {
?sp_cat0 rdfs:label ?sp_cat
}
}
OPTIONAL {
?call f:inContainerUnit ?constr .
?constr a f:ContainerUnit .
FILTER EXISTS {
{
?constr f:inProgramUnit ?pu .
FILTER NOT EXISTS {
?call f:inSubprogram/f:inContainerUnit ?constr .
}
}
UNION
{
?call f:inSubprogram ?sp0 .
?constr f:inSubprogram ?sp0 .
}
}
FILTER NOT EXISTS {
?c a f:ContainerUnit ;
f:inContainerUnit ?constr .
?call f:inContainerUnit ?c .
FILTER (?c != ?constr)
}
}
OPTIONAL {
?call f:inMainProgram ?main .
?main a f:MainProgram .
OPTIONAL {
?main f:name ?prog .
}
}
}
}
''' % NS_TBL
Q_DIRECTIVES_F = '''DEFINE input:inference "ont.cpi"
PREFIX f: <%(f_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?sp ?sp_cat ?sub ?main ?prog ?dtv ?cat ?constr
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?pu ?pu_name ?vpu_name ?dtv ?cat
WHERE {
?dtv a f:CompilerDirective ;
a ?cat0 OPTION (INFERENCE NONE) ;
f:inProgramUnitOrSubprogram ?pu_or_sp ;
f:inProgramUnit ?pu .
?pu_or_sp src:inFile/src:location ?loc .
?pu a f:ProgramUnit ;
src:inFile/src:location ?pu_loc ;
ver:version ?ver .
OPTIONAL {
?pu f:name ?pu_name
}
OPTIONAL {
?pu f:includedInProgramUnit ?vpu .
?vpu f:name ?vpu_name .
}
GRAPH <http://codinuum.com/ont/cpi> {
?cat0 rdfs:label ?cat .
}
} GROUP BY ?ver ?loc ?pu ?pu_name ?vpu_name ?dtv ?cat
}
OPTIONAL {
?dtv f:inSubprogram ?sp .
?sp a f:Subprogram ;
a ?sp_cat0 OPTION (INFERENCE NONE) ;
f:name ?sub .
FILTER NOT EXISTS {
?dtv f:inSubprogram ?sp0 .
?sp0 f:inSubprogram ?sp .
FILTER (?sp != ?sp0)
}
GRAPH <http://codinuum.com/ont/cpi> {
?sp_cat0 rdfs:label ?sp_cat
}
}
OPTIONAL {
?dtv f:inContainerUnit ?constr .
?constr a f:ContainerUnit .
FILTER EXISTS {
{
?constr f:inProgramUnit ?pu .
FILTER NOT EXISTS {
?dtv f:inSubprogram/f:inContainerUnit ?constr .
}
}
UNION
{
?dtv f:inSubprogram ?sp0 .
?constr f:inSubprogram ?sp0 .
}
}
FILTER NOT EXISTS {
?c a f:ContainerUnit ;
f:inContainerUnit ?constr .
?dtv f:inContainerUnit ?c .
FILTER (?c != ?constr)
}
}
OPTIONAL {
?dtv f:inMainProgram ?main .
?main a f:MainProgram .
OPTIONAL {
?main f:name ?prog .
}
}
}
}
''' % NS_TBL
Q_CONSTR_CONSTR_F = '''DEFINE input:inference "ont.cpi"
PREFIX f: <%(f_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?sp ?sp_cat ?sub ?main ?prog
?constr ?cat
?parent_constr ?parent_cat ?parent_sub ?parent_prog ?parent_pu_name ?parent_vpu_name
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?pu ?pu_name ?vpu_name ?constr
WHERE {
?constr a f:ContainerUnit ;
f:inProgramUnitOrSubprogram ?pu_or_sp ;
f:inProgramUnit ?pu .
?pu_or_sp src:inFile/src:location ?loc .
?pu a f:ProgramUnit ;
src:inFile/src:location ?pu_loc ;
ver:version ?ver .
OPTIONAL {
?pu f:name ?pu_name
}
OPTIONAL {
?pu f:includedInProgramUnit ?vpu .
?vpu f:name ?vpu_name .
}
} GROUP BY ?ver ?loc ?pu ?pu_name ?vpu_name ?constr
}
OPTIONAL {
SELECT DISTINCT ?constr (GROUP_CONCAT(DISTINCT ?c; SEPARATOR="&") AS ?cat)
WHERE {
?constr a ?cat0 OPTION (INFERENCE NONE) .
GRAPH <http://codinuum.com/ont/cpi> {
?cat0 rdfs:label ?c .
}
} GROUP BY ?constr
}
OPTIONAL {
?constr f:inSubprogram ?sp .
?sp a f:Subprogram ;
a ?sp_cat0 OPTION (INFERENCE NONE) ;
f:name ?sub .
FILTER NOT EXISTS {
?constr f:inSubprogram ?sp0 .
?sp0 f:inSubprogram ?sp .
FILTER (?sp != ?sp0)
}
# FILTER NOT EXISTS {
# ?constr f:inMainProgram ?m0 .
# ?m0 f:inContainerUnit ?parent_constr .
# FILTER (?m0 != ?constr && ?m0 != ?parent_constr)
# }
GRAPH <http://codinuum.com/ont/cpi> {
?sp_cat0 rdfs:label ?sp_cat .
}
}
OPTIONAL {
?constr f:inMainProgram ?main .
?main a f:MainProgram .
OPTIONAL {
?main f:name ?prog .
}
}
OPTIONAL {
?constr f:inContainerUnit ?parent_constr .
?parent_constr a f:ContainerUnit .
FILTER (?constr != ?parent_constr)
FILTER NOT EXISTS {
?constr f:inContainerUnit ?p0 .
?p0 a f:ContainerUnit ;
f:inContainerUnit ?parent_constr .
FILTER (?p0 != ?constr && ?p0 != ?parent_constr)
}
FILTER NOT EXISTS {
?constr f:inSubprogram ?sp0 .
?sp0 f:inContainerUnit ?parent_constr .
FILTER (?sp0 != ?constr && ?sp0 != ?parent_constr)
}
{
SELECT DISTINCT ?parent_constr (GROUP_CONCAT(DISTINCT ?c0; SEPARATOR="&") AS ?parent_cat)
WHERE {
?parent_constr a ?parent_cat0 OPTION (INFERENCE NONE) .
GRAPH <http://codinuum.com/ont/cpi> {
?parent_cat0 rdfs:label ?c0 .
}
} GROUP BY ?parent_constr
}
OPTIONAL {
?parent_constr f:inProgramUnit ?parent_pu .
?parent_pu f:name ?parent_pu_name .
}
OPTIONAL {
?parent_constr f:inProgramUnit/f:includedInProgramUnit ?parent_vpu .
?parent_vpu f:name ?parent_vpu_name .
}
OPTIONAL {
?parent_constr f:inMainProgram ?parent_main .
?parent_main a f:MainProgram .
OPTIONAL {
?parent_main f:name ?parent_prog .
}
}
OPTIONAL {
?parent_constr f:inSubprogram ?parent_sp .
?parent_sp a f:Subprogram ;
f:name ?parent_sub .
FILTER NOT EXISTS {
?parent_constr f:inSubprogram ?sp0 .
?sp0 f:inSubprogram ?parent_sp .
FILTER (?parent_sp != ?sp0)
}
}
}
}
}
''' % NS_TBL
Q_CONSTR_SP_F = '''DEFINE input:inference "ont.cpi"
PREFIX f: <%(f_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?sp ?sp_cat ?sub ?main ?prog
?constr ?cat ?call ?call_cat
?callee ?callee_name ?callee_loc ?callee_cat ?callee_pu_name
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?constr ?callee ?cat ?call ?call_cat
WHERE {
?call a ?call_cat0 OPTION (INFERENCE NONE) ;
f:inContainerUnit ?constr ;
f:mayCall ?callee .
FILTER (?call_cat0 IN (f:CallStmt, f:FunctionReference, f:PartName))
?constr a f:ContainerUnit ;
a ?cat0 OPTION (INFERENCE NONE) ;
f:inProgramUnitOrSubprogram ?pu_or_sp ;
f:inProgramUnit ?pu .
?pu_or_sp src:inFile/src:location ?loc .
FILTER NOT EXISTS {
?c a f:ContainerUnit ;
f:inContainerUnit+ ?constr .
?call f:inContainerUnit+ ?c .
FILTER (?c != ?constr)
}
?pu a f:ProgramUnit ;
ver:version ?ver ;
src:inFile/src:location ?pu_loc .
OPTIONAL {
?pu f:name ?pu_name
}
OPTIONAL {
?pu f:includedInProgramUnit ?vpu .
?vpu f:name ?vpu_name .
}
GRAPH <http://codinuum.com/ont/cpi> {
?cat0 rdfs:label ?cat .
?call_cat0 rdfs:label ?call_cat .
}
} GROUP BY ?ver ?loc ?pu_name ?vpu_name ?constr ?callee ?cat ?call ?call_cat
}
{
SELECT DISTINCT ?callee ?callee_cat ?callee_loc ?ver ?callee_pu_name
(GROUP_CONCAT(DISTINCT ?cn; SEPARATOR=":") AS ?callee_name)
WHERE {
?callee a f:Subprogram ;
a ?callee_cat0 OPTION (INFERENCE NONE) ;
f:name ?cn ;
src:inFile ?callee_file .
?callee_file a src:File ;
src:location ?callee_loc ;
ver:version ?ver .
GRAPH <http://codinuum.com/ont/cpi> {
?callee_cat0 rdfs:label ?callee_cat
}
OPTIONAL {
?callee f:inProgramUnit/f:name ?callee_pu_name .
}
} GROUP BY ?callee ?callee_cat ?callee_loc ?ver ?callee_pu_name
}
OPTIONAL {
?constr f:inSubprogram ?sp .
?sp a f:Subprogram ;
a ?sp_cat0 OPTION (INFERENCE NONE) ;
f:name ?sub .
FILTER NOT EXISTS {
?constr f:inSubprogram ?sp0 .
?sp0 f:inSubprogram ?sp .
FILTER (?sp != ?sp0)
}
GRAPH <http://codinuum.com/ont/cpi> {
?sp_cat0 rdfs:label ?sp_cat
}
}
OPTIONAL {
?constr f:inMainProgram ?main .
?main a f:MainProgram .
OPTIONAL {
?main f:name ?prog .
}
}
}
}
''' % NS_TBL
Q_SP_SP_F = '''DEFINE input:inference "ont.cpi"
PREFIX f: <%(f_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?sp ?sp_cat ?sub ?main ?prog
?callee ?callee_name ?callee_loc ?callee_cat ?call ?call_cat ?constr ?callee_pu_name
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?pu ?pu_name ?vpu_name ?callee ?call ?call_cat
WHERE {
?call a ?call_cat0 OPTION (INFERENCE NONE) ;
f:inProgramUnitOrSubprogram ?pu_or_sp ;
f:inProgramUnit ?pu ;
f:mayCall ?callee .
?pu_or_sp src:inFile/src:location ?loc .
?pu a f:ProgramUnit ;
src:inFile/src:location ?pu_loc ;
ver:version ?ver .
OPTIONAL {
?pu f:name ?pu_name
}
OPTIONAL {
?pu f:includedInProgramUnit ?vpu .
?vpu f:name ?vpu_name .
}
FILTER (?call_cat0 IN (f:CallStmt, f:FunctionReference, f:PartName))
FILTER NOT EXISTS {
?call f:inContainerUnit [] .
}
GRAPH <http://codinuum.com/ont/cpi> {
?call_cat0 rdfs:label ?call_cat .
}
} GROUP BY ?ver ?loc ?pu ?pu_name ?vpu_name ?callee ?call ?call_cat
}
{
SELECT DISTINCT ?callee ?callee_cat ?callee_loc ?ver ?callee_pu_name
(GROUP_CONCAT(DISTINCT ?cn; SEPARATOR=":") AS ?callee_name)
WHERE {
?callee a f:Subprogram ;
a ?callee_cat0 OPTION (INFERENCE NONE) ;
f:name ?cn ;
src:inFile ?callee_file .
?callee_file a src:File ;
src:location ?callee_loc ;
ver:version ?ver .
GRAPH <http://codinuum.com/ont/cpi> {
?callee_cat0 rdfs:label ?callee_cat
}
OPTIONAL {
?callee f:inProgramUnit/f:name ?callee_pu_name .
}
} GROUP BY ?callee ?callee_cat ?callee_loc ?ver ?callee_pu_name
}
OPTIONAL {
?call f:inSubprogram ?sp .
?sp a f:Subprogram ;
a ?sp_cat0 OPTION (INFERENCE NONE) ;
f:name ?sub .
FILTER NOT EXISTS {
?call f:inSubprogram ?sp0 .
?sp0 f:inSubprogram ?sp .
FILTER (?sp != ?sp0)
}
GRAPH <http://codinuum.com/ont/cpi> {
?sp_cat0 rdfs:label ?sp_cat
}
}
OPTIONAL {
?call f:inMainProgram ?main .
?main a f:MainProgram .
OPTIONAL {
?main f:name ?prog .
}
}
}
}
''' % NS_TBL
Q_CONSTR_QSPN_F = '''DEFINE input:inference "ont.cpi"
PREFIX f: <%(f_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?qspn ?constr
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?pu_name ?vpu_name ?sp0 ?constr
(GROUP_CONCAT(DISTINCT CONCAT(STR(?dist), ?n); SEPARATOR=",") AS ?qspn)
WHERE {
?constr a f:ContainerUnit ;
f:inSubprogram ?sp0 ;
f:inProgramUnit ?pu .
?sp0 src:inFile/src:location ?loc .
FILTER NOT EXISTS {
?constr f:inSubprogram/f:inSubprogram ?sp0 .
}
?pu a f:ProgramUnit ;
src:inFile/src:location ?pu_loc ;
ver:version ?ver .
OPTIONAL {
?pu f:name ?pu_name .
}
OPTIONAL {
?pu f:includedInProgramUnit ?vpu .
?vpu f:name ?vpu_name .
}
?sp0 a f:Subprogram ;
f:name ?sp0_name .
?spx f:name ?n .
{
SELECT ?x ?sp
WHERE {
?x a f:Subprogram ;
f:inSubprogram ?sp .
}
} OPTION(TRANSITIVE,
T_IN(?x),
T_OUT(?sp),
T_DISTINCT,
T_MIN(0),
T_NO_CYCLES,
T_STEP (?x) AS ?spx,
T_STEP ('step_no') AS ?dist
)
FILTER (?x = ?sp0)
} GROUP BY ?ver ?loc ?sp0 ?constr ?pu_name ?vpu_name
}
}
}
''' % NS_TBL
QUERY_TBL = {
'aa_in_loop' : Q_AA_IN_LOOP_F,
'other_calls' : Q_OTHER_CALLS_F,
'directives' : Q_DIRECTIVES_F,
'constr_constr' : Q_CONSTR_CONSTR_F,
'constr_sp' : Q_CONSTR_SP_F,
'sp_sp' : Q_SP_SP_F,
'constr_qspn' : Q_CONSTR_QSPN_F,
}
def get_root_entities(full=False):
s = set(['main-program'])
if full:
s |= set([
'subroutine-external-subprogram',
'subroutine-module-subprogram',
'function-external-subprogram',
'function-module-subprogram',
])
return s
|
apache-2.0
| -5,018,884,159,477,743,000
| 23.583548
| 100
| 0.536547
| false
| 3.068999
| false
| false
| false
|
kpbochenek/empireofcode
|
non_unique.py
|
1
|
1121
|
# kpbochenek@gmail.com
def non_unique(data):
result = []
diff = 32
for d in data:
if data.count(d) > 1:
result.append(d)
elif type(d) is str and data.count(d) + data.count(chr(ord(d) + diff)) > 1:
result.append(d)
elif type(d) is str and data.count(d) + data.count(chr(ord(d) - diff)) > 1:
result.append(d)
return result
if __name__ == "__main__":
# These "asserts" using only for self-checking and not necessary for auto-testing
# Rank 1
assert isinstance(non_unique([1]), list), "The result must be a list"
assert non_unique([1, 2, 3, 1, 3]) == [1, 3, 1, 3], "1st example"
assert non_unique([1, 2, 3, 4, 5]) == [], "2nd example"
assert non_unique([5, 5, 5, 5, 5]) == [5, 5, 5, 5, 5], "3rd example"
assert non_unique([10, 9, 10, 10, 9, 8]) == [10, 9, 10, 10, 9], "4th example"
# Rank 2
assert non_unique(['P', 7, 'j', 'A', 'P', 'N', 'Z', 'i',
'A', 'X', 'j', 'L', 'y', 's', 'K', 'g',
'p', 'r', 7, 'b']) == ['P', 7, 'j', 'A', 'P', 'A', 'j', 'p', 7], "Letters"
|
apache-2.0
| -4,089,713,566,800,570,000
| 39.035714
| 97
| 0.484389
| false
| 2.7543
| false
| false
| false
|
cloud9ers/j25framework
|
j25/loaders/reloader.py
|
1
|
5047
|
# Python Module Reloader
#
# Copyright (c) 2009, 2010 Jon Parise <jon@indelible.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import Importer
import j25
import logging
import os
import sys
import threading
import time
try:
import queue
except ImportError:
#python 2.x
import Queue as queue
_win32 = (sys.platform == 'win32')
logger = logging.getLogger("Module Reloader")
class ModuleMonitor(threading.Thread):
"""Monitor module source file changes"""
def __init__(self, interval=1):
threading.Thread.__init__(self)
self.daemon = True
self.mtimes = {}
self.queue = queue.Queue()
self.interval = interval
self.is_running = True
def terminate(self):
self.is_running = False
def run(self):
while self.is_running:
self._scan()
time.sleep(self.interval)
logger.info("ModuleMonitor terminated")
def _scan(self):
# We're only interested in file-based modules (not C extensions).
# We are only interested in project files changes
modules = [m.__file__ for m in sys.modules.values()
if m and '__file__' in m.__dict__ and m.__file__.startswith(j25.project_directory)]
for filename in modules:
# We're only interested in the source .py files.
if filename.endswith('.pyc') or filename.endswith('.pyo'):
filename = filename[:-1]
# stat() the file. This might fail if the module is part of a
# bundle (.egg). We simply skip those modules because they're
# not really reloadable anyway.
try:
stat = os.stat(filename)
except OSError:
continue
# Check the modification time. We need to adjust on Windows.
mtime = stat.st_mtime
if _win32:
mtime -= stat.st_ctime
# Check if we've seen this file before. We don't need to do
# anything for new files.
if filename in self.mtimes:
# If this file's mtime has changed, queue it for reload.
if mtime != self.mtimes[filename]:
print "file %s enqueued" % filename
self.queue.put(filename)
# Record this filename's current mtime.
self.mtimes[filename] = mtime
class Reloader(threading.Thread):
def __init__(self, interval=1):
threading.Thread.__init__(self)
self.monitor = ModuleMonitor(interval=interval)
self.monitor.start()
self.interval = interval
self.is_running = True
logging.info("Module Monitor Started")
def run(self):
self._logger = logging.getLogger("Reloader")
while self.is_running:
self.poll()
time.sleep(self.interval)
self.monitor.terminate()
self._logger.info("Module Reloader terminated")
def terminate(self):
self.is_running = False
def poll(self):
filenames = set()
while not self.monitor.queue.empty():
try:
filename = self.monitor.queue.get_nowait()
filenames.add(filename)
except queue.Empty:
break
if filenames:
self._reload(filenames)
def _check(self, filenames, module):
mod_file = getattr(module, '__file__', None)
if mod_file:
for filename in filenames:
if mod_file.startswith(filename):
return True
return False
def _reload(self, filenames):
modules = [m for m in sys.modules.values()
if self._check(filenames, m)]
for mod in modules:
self._logger.info("Reloading module %s", mod.__name__)
Importer.reload(mod)
else:
j25._load_routing()
j25._update_mapper()
j25._dispatcher.register_all_apps_router()
|
lgpl-3.0
| 8,412,264,403,082,464,000
| 33.813793
| 99
| 0.607886
| false
| 4.486222
| false
| false
| false
|
sxjscience/tvm
|
python/tvm/topi/nn/dilate.py
|
1
|
2528
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dilation operators"""
import tvm
from tvm import te
from .. import util
from .. import tag
@te.tag_scope(tag=tag.INJECTIVE + ",dilate")
def dilate(data, strides, dilation_value=0.0, name="DilatedInput"):
"""Dilate data with given dilation value (0 by default).
Parameters
----------
data : tvm.te.Tensor
n-D, can be any layout.
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as data.
"""
n = len(data.shape)
if len(strides) != n:
raise ValueError("data dimension and strides size dismatch : %d vs %d" % (n, len(strides)))
ana = tvm.arith.Analyzer()
out_shape = tuple(ana.simplify((data.shape[i] - 1) * strides[i] + 1) for i in range(n))
def _dilate(*indices):
not_zero = []
index_tuple = []
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
for i in range(n):
if not util.equal_const_int(strides[i], 1):
index_tuple.append(idxdiv(indices[i], strides[i]))
not_zero.append(idxmod(indices[i], strides[i]).equal(0))
else:
index_tuple.append(indices[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, data(*index_tuple), tvm.tir.const(dilation_value, data.dtype)
)
return data(*index_tuple)
return te.compute(out_shape, _dilate, name=name)
|
apache-2.0
| 205,081,130,815,146,940
| 34.111111
| 99
| 0.645174
| false
| 3.750742
| false
| false
| false
|
googleapis/python-talent
|
google/cloud/talent_v4beta1/services/tenant_service/transports/base.py
|
1
|
9841
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.talent_v4beta1.types import tenant
from google.cloud.talent_v4beta1.types import tenant as gct_tenant
from google.cloud.talent_v4beta1.types import tenant_service
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class TenantServiceTransport(abc.ABC):
"""Abstract transport class for TenantService."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
)
DEFAULT_HOST: str = "jobs.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_tenant: gapic_v1.method.wrap_method(
self.create_tenant, default_timeout=30.0, client_info=client_info,
),
self.get_tenant: gapic_v1.method.wrap_method(
self.get_tenant,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.update_tenant: gapic_v1.method.wrap_method(
self.update_tenant, default_timeout=30.0, client_info=client_info,
),
self.delete_tenant: gapic_v1.method.wrap_method(
self.delete_tenant,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_tenants: gapic_v1.method.wrap_method(
self.list_tenants,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
}
@property
def create_tenant(
self,
) -> Callable[
[tenant_service.CreateTenantRequest],
Union[gct_tenant.Tenant, Awaitable[gct_tenant.Tenant]],
]:
raise NotImplementedError()
@property
def get_tenant(
self,
) -> Callable[
[tenant_service.GetTenantRequest],
Union[tenant.Tenant, Awaitable[tenant.Tenant]],
]:
raise NotImplementedError()
@property
def update_tenant(
self,
) -> Callable[
[tenant_service.UpdateTenantRequest],
Union[gct_tenant.Tenant, Awaitable[gct_tenant.Tenant]],
]:
raise NotImplementedError()
@property
def delete_tenant(
self,
) -> Callable[
[tenant_service.DeleteTenantRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_tenants(
self,
) -> Callable[
[tenant_service.ListTenantsRequest],
Union[
tenant_service.ListTenantsResponse,
Awaitable[tenant_service.ListTenantsResponse],
],
]:
raise NotImplementedError()
__all__ = ("TenantServiceTransport",)
|
apache-2.0
| 7,331,914,839,858,532,000
| 36.418251
| 103
| 0.603292
| false
| 4.356352
| false
| false
| false
|
enrimatta/RU_Python_IV
|
labs/lab_objects.py
|
1
|
1675
|
#!/usr/bin/env python
# *-* coding:utf-8 *-*
"""
:mod:`lab_objects` -- Objects in Python
=========================================
LAB Objects Learning Objective: Explore objects in Python and how everything in Python
is an object.
a. Fill in the series of functions below that determine the characteristics of an object.
b. Write a print_object_flags function that uses the is_* functions to find the characteristics
of the passed in object and print the characteristics (flags).
"""
def is_callable(obj):
""" returns True if the object is callable """
# __call__
return hasattr(obj, "__call__")
def is_with(obj):
""" returns True if the object can be used in a "with" context """
# __enter__, __exit__
return hasattr(obj, "__enter__") and hasattr(obj, "__exit__")
def is_math(obj):
""" returns True if the object supports +, -, /, and * """
# __add__, ...
return hasattr(obj, "__add__") and hasattr(obj, "__mul__") and \
hasattr(obj, "__sub__") and hasattr(obj, "__div__")
def is_iterable(obj):
""" returns True if the object is iterable """
# __iter__
return hasattr(obj, "__iter__")
def print_object_flags(obj):
""" assess the object for various characteristics and print them """
if(is_callable(obj)):
print "CALLABLE"
if(is_with(obj)):
print "WITH"
if(is_math(obj)):
print "MATH"
if(is_iterable(obj)):
print "ITERABLE"
if __name__ == "__main__":
print_object_flags(1)
print_object_flags("abc")
print_object_flags(print_object_flags)
print_object_flags([1, 2, 3])
print_object_flags(file)
|
gpl-2.0
| 3,440,704,479,022,479,000
| 26.016129
| 95
| 0.588657
| false
| 3.657205
| false
| false
| false
|
nesaro/driza
|
pyrqt/carga/operaciones/anova.py
|
1
|
3082
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright (C) 2006-2008 Inmaculada Luengo Merino, Néstor Arocha Rodríguez
#This file is part of pyrqt.
#
#pyrqt is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#pyrqt is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with pyrqt; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Anova"""
#TODO: pasar a formato nuevo
nombre = u"Anova Simple"
#tipo = "Variable"
tipo = "Casos" #FIXME: Tipo incorrecto
etiquetas = ["Otros"]
factor = {"nombre":"Factor", "tipo":"Factores"}
widget = {"tipo":"Variable", "opciones":[factor]}
def funcion(dato, variable, caso, opciones):
"""Funcion que convierte los datos de entrada en los resultados"""
import rpy #pylint: disable=import-error
diccionario = {}
r_data = {"Variable":[], "Factor":[]}
for x in dato.query(variable, caso = caso):
r_data["Variable"].append(float(x))
for x in dato.query(opciones["Factor"], caso = caso):
r_data["Factor"].append(repr(x))
# lista=[float(x) for x in dato.getCol(variable,caso=caso)]
# agrupacion=[x for x in dato.getCasos(opciones["Factor"])]
# agrupacion2=[x for x in dato.getCol(opciones["Factor"],caso=caso)]
# mifuncion=lambda f:agrupacion.index(f)
# agrupacionfinal=map(mifuncion,agrupacion2)
r_data_table = rpy.with_mode(rpy.NO_CONVERSION, rpy.r.data_frame)(r_data)
modelo = rpy.r("Variable ~ Factor")
aov = rpy.with_mode(rpy.NO_CONVERSION, rpy.r.aov)(modelo, r_data_table)
diccionario = rpy.r.summary(aov)
return diccionario
def initresultado(resultado, opciones):
"""Inicializa al objeto resultado, añadiendole lo que crea conveniente"""
resultado.addTablaSimple("resultado")
resultado["resultado"].titulo = u"Anova"
lista = []
if opciones["caso"]:
lista.append("Caso")
lista += [u"Resultado en bruto"]
resultado["resultado"].settitulo(lista)
def interfazresultado(resultado, listaopciones, floatrender = None):
"""Este método dice como introducir los datos en la tabla"""
lista = []
variable = listaopciones[0]
caso = listaopciones[1]
if caso:
lista.append(caso)
diccionario = listaopciones[2]
resultado["resultado"].set(variable, [str(diccionario)])
def comprobarentrada(opciones):
if not opciones["Factor"]:
from pyrqt.excepciones import OpcionesIncorrectaException
raise OpcionesIncorrectaException
def funcionprincipal(): pass
def funcionchequeocondiciones(interfazdato): return False
def funcionchequeoentradausuario(opciones): return False
definicionresultado = []
|
gpl-2.0
| 3,244,386,289,570,607,000
| 35.642857
| 77
| 0.707602
| false
| 2.973913
| false
| false
| false
|
blockstack/blockstack-server
|
integration_tests/blockstack_integration_tests/scenarios/namespace_preorder_reveal_import_multi_ready.py
|
1
|
3316
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
import testlib
import json
import virtualchain
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ),
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 )
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
def scenario( wallets, **kw ):
testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey )
testlib.next_block( **kw )
resp = testlib.blockstack_name_import( "foo.test", wallets[2].addr, "11" * 20, wallets[1].privkey )
if 'error' in resp:
print json.dumps(resp, indent=4 )
testlib.next_block( **kw )
resp = testlib.blockstack_name_import( "foo.test", wallets[3].addr, "22" * 20, wallets[1].privkey )
if 'error' in resp:
print json.dumps(resp, indent=4 )
testlib.next_block( **kw )
resp = testlib.blockstack_name_import( "foo.test", wallets[4].addr, "44" * 20, wallets[1].privkey )
resp = testlib.blockstack_name_import( "foo.test", wallets[4].addr, "55" * 20, wallets[1].privkey )
resp = testlib.blockstack_name_import( "foo.test", wallets[4].addr, "33" * 20, wallets[1].privkey )
if 'error' in resp:
print json.dumps(resp, indent=4 )
testlib.next_block( **kw )
testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
testlib.next_block( **kw )
def check( state_engine ):
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
return False
if ns['namespace_id'] != 'test':
return False
# each name must exist
foo = state_engine.get_name( "foo.test" )
if foo is None:
return False
if foo['value_hash'] != "33" * 20:
return False
if foo['address'] != wallets[4].addr or foo['sender'] != virtualchain.make_payment_script(wallets[4].addr):
return False
return True
|
gpl-3.0
| -1,843,037,481,361,064,400
| 33.541667
| 144
| 0.676116
| false
| 2.870996
| true
| false
| false
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert/enkf/plot_data/pca_plot_vector.py
|
1
|
2288
|
from ert.cwrap import BaseCClass, CWrapper
from ert.enkf import ENKF_LIB
from ert.util import Matrix
class PcaPlotVector(BaseCClass):
def __init__(self, component, principal_component_matrix, observation_principal_component_matrix):
assert isinstance(component, int)
assert isinstance(principal_component_matrix, Matrix)
assert isinstance(observation_principal_component_matrix, Matrix)
c_pointer = PcaPlotVector.cNamespace().alloc(component, principal_component_matrix, observation_principal_component_matrix)
super(PcaPlotVector, self).__init__(c_pointer)
def __len__(self):
""" @rtype: int """
return PcaPlotVector.cNamespace().size(self)
def __getitem__(self, index):
"""
@type index: int
@rtype: float
"""
assert isinstance(index, int)
return PcaPlotVector.cNamespace().get(self, index)
def __iter__(self):
cur = 0
while cur < len(self):
yield self[cur]
cur += 1
def getObservation(self):
""" @rtype: float """
return PcaPlotVector.cNamespace().get_obs(self)
def getSingularValue(self):
""" @rtype: float """
return PcaPlotVector.cNamespace().get_singular_value(self)
def free(self):
PcaPlotVector.cNamespace().free(self)
cwrapper = CWrapper(ENKF_LIB)
cwrapper.registerType("pca_plot_vector", PcaPlotVector)
cwrapper.registerType("pca_plot_vector_obj", PcaPlotVector.createPythonObject)
cwrapper.registerType("pca_plot_vector_ref", PcaPlotVector.createCReference)
PcaPlotVector.cNamespace().alloc = cwrapper.prototype("c_void_p pca_plot_vector_alloc(int, matrix, matrix)")
PcaPlotVector.cNamespace().free = cwrapper.prototype("void pca_plot_vector_free(pca_plot_vector)")
PcaPlotVector.cNamespace().size = cwrapper.prototype("int pca_plot_vector_get_size(pca_plot_vector)")
PcaPlotVector.cNamespace().get = cwrapper.prototype("double pca_plot_vector_iget_sim_value(pca_plot_vector, int)")
PcaPlotVector.cNamespace().get_obs = cwrapper.prototype("double pca_plot_vector_get_obs_value(pca_plot_vector)")
PcaPlotVector.cNamespace().get_singular_value = cwrapper.prototype("double pca_plot_vector_get_singular_value(pca_plot_vector)")
|
gpl-3.0
| -3,688,779,641,078,196,700
| 35.903226
| 131
| 0.693182
| false
| 3.591837
| false
| false
| false
|
gauravbose/digital-menu
|
digimenu2/restaurant/migrations/0030_kitchen_usertable.py
|
1
|
1126
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('restaurant', '0029_cart'),
]
operations = [
migrations.CreateModel(
name='Kitchen',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('table', models.IntegerField()),
('status', models.CharField(default=b'RC', max_length=2, choices=[(b'RC', b'recieved'), (b'PG', b'preparing'), (b'PD', b'prepared'), (b'DD', b'delivered')])),
('menu_item', models.ForeignKey(to='restaurant.Menu')),
],
),
migrations.CreateModel(
name='Usertable',
fields=[
('table_no', models.IntegerField(serialize=False, primary_key=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
bsd-3-clause
| 7,702,231,826,981,313,000
| 34.1875
| 174
| 0.563055
| false
| 4.139706
| false
| false
| false
|
hammerhorn/working
|
cjh/letter.py
|
1
|
3270
|
#coding=utf8
import time
from cjh.music import Pitch, Note
from cjh.misc import speak
"""
translate the Roman alphabet into, e.g.,
radiophonic words, morse code, braille, etc....
"""
class Letter(object):
"""
convert between different forms of Roman-alphabet letters
"""
morse_dict = {
'1':'.----',
'2':'..---',
'3':'...--',
'4':'....-',
'5':'.....',
'6':'-....',
'7':'--...',
'8':'---..',
'9':'----.',
'0':'-----',
'A':'.-',
'B':'-...',
'C':'-.-.',
'D':'-..',
'E':'.',
'F':'..-.',
'G':'--.',
'H':'....',
'I':'..',
'J':'.---',
'K':'-.-',
'L':'.-..',
'M':'--',
'N':'-.',
'O':'---',
'P':'.--.',
'Q':'--.-',
'R':'.-.',
'S':'...',
'T':'-',
'U':'..-',
'V':'...-',
'W':'.--',
'X':'-..-',
'Y':'-.--',
'Z':'--..',
' ':'/', '.':'.-.-.-'}
radio_dict = {
'A':'Alfa',
'B':'Bravo',
'C':'Charlie',
'D':'Delta',
'E':'Echo',
'F':'Foxtrot',
'G':'Golf',
'H':'Hotel',
'I':'India',
'J':'Juliett',
'K':'Kilo',
'L':'Lima',
'M':'Mike',
'N':'November',
'O':'Oscar',
'P':'Papa',
'Q':'Quebec',
'R':'Romeo',
'S':'Sierra',
'T':'Tango',
'U':'Uniform',
'V':'Victor',
'W':'Whiskey',
'X':'Xray',
'Y':'Yankee',
'Z':'Zulu', ' ': '', '.': 'stop'}
braille_dict = {
'A':'⠁',
'B':'⠃',
'C':'⠉',
'D':'⠙',
'E':'⠑',
'F':'⠋',
'G':'⠛',
'H':'⠓',
'I':'⠊',
'J':'⠚',
'K':'⠅',
'L':'⠇',
'M':'⠍',
'N':'⠝',
'O':'⠕',
'P':'⠏',
'Q':'⠟',
'R':'⠗',
'S':'⠎',
'T':'⠞',
'U':'⠥',
'V':'⠧',
'W':'⠺',
'X':'⠭',
'Y':'⠽',
'Z':'⠵', ' ':None, '.':None}
def __init__(self, char):
self.majuscule = char.upper()
self.radio_name = self.__class__.radio_dict[char.upper()]
self.braille = self.__class__.braille_dict[char.upper()]
self.morse = self.__class__.morse_dict[char.upper()]
self.mora = 0.048
self.wpm = 1.2 / self.mora
self.hz = 1000
def __str__(self):
return '{} {} {}'.format(self.radio_name, self.braille, self.morse)
def play_morse(self):
for x in self.morse:
if x == '.':
Note(Pitch(freq=self.hz), self.mora).play()
#time.sleep(.025)
elif x == '-':
Note(Pitch(freq=self.hz), self.mora * 3).play()
elif x == ' ':
time.sleep(7 * self.mora)
time.sleep(self.mora)
time.sleep(3 * self.mora)
def radio_speak(self):
spoken_forms = {
'J': 'Julie-et',
'O': 'Oska',
'P': 'Pawpaw',
'Q': 'Kebec'
}
speak(spoken_forms.get(self.majuscule, self.radio_name))
|
gpl-3.0
| 8,146,375,046,475,646,000
| 21.661972
| 75
| 0.306712
| false
| 2.955005
| false
| false
| false
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/work_summary_v30_rc1.py
|
1
|
13920
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30_rc1 import CreatedDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30_rc1 import ExternalIDsV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc1 import LastModifiedDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.publication_date_v30_rc1 import PublicationDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.source_v30_rc1 import SourceV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc1 import TitleV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc1 import UrlV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.work_title_v30_rc1 import WorkTitleV30Rc1 # noqa: F401,E501
class WorkSummaryV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'put_code': 'int',
'created_date': 'CreatedDateV30Rc1',
'last_modified_date': 'LastModifiedDateV30Rc1',
'source': 'SourceV30Rc1',
'title': 'WorkTitleV30Rc1',
'external_ids': 'ExternalIDsV30Rc1',
'url': 'UrlV30Rc1',
'type': 'str',
'publication_date': 'PublicationDateV30Rc1',
'journal_title': 'TitleV30Rc1',
'visibility': 'str',
'path': 'str',
'display_index': 'str'
}
attribute_map = {
'put_code': 'put-code',
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'title': 'title',
'external_ids': 'external-ids',
'url': 'url',
'type': 'type',
'publication_date': 'publication-date',
'journal_title': 'journal-title',
'visibility': 'visibility',
'path': 'path',
'display_index': 'display-index'
}
def __init__(self, put_code=None, created_date=None, last_modified_date=None, source=None, title=None, external_ids=None, url=None, type=None, publication_date=None, journal_title=None, visibility=None, path=None, display_index=None): # noqa: E501
"""WorkSummaryV30Rc1 - a model defined in Swagger""" # noqa: E501
self._put_code = None
self._created_date = None
self._last_modified_date = None
self._source = None
self._title = None
self._external_ids = None
self._url = None
self._type = None
self._publication_date = None
self._journal_title = None
self._visibility = None
self._path = None
self._display_index = None
self.discriminator = None
if put_code is not None:
self.put_code = put_code
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if title is not None:
self.title = title
if external_ids is not None:
self.external_ids = external_ids
if url is not None:
self.url = url
if type is not None:
self.type = type
if publication_date is not None:
self.publication_date = publication_date
if journal_title is not None:
self.journal_title = journal_title
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
if display_index is not None:
self.display_index = display_index
@property
def put_code(self):
"""Gets the put_code of this WorkSummaryV30Rc1. # noqa: E501
:return: The put_code of this WorkSummaryV30Rc1. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this WorkSummaryV30Rc1.
:param put_code: The put_code of this WorkSummaryV30Rc1. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def created_date(self):
"""Gets the created_date of this WorkSummaryV30Rc1. # noqa: E501
:return: The created_date of this WorkSummaryV30Rc1. # noqa: E501
:rtype: CreatedDateV30Rc1
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this WorkSummaryV30Rc1.
:param created_date: The created_date of this WorkSummaryV30Rc1. # noqa: E501
:type: CreatedDateV30Rc1
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this WorkSummaryV30Rc1. # noqa: E501
:return: The last_modified_date of this WorkSummaryV30Rc1. # noqa: E501
:rtype: LastModifiedDateV30Rc1
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this WorkSummaryV30Rc1.
:param last_modified_date: The last_modified_date of this WorkSummaryV30Rc1. # noqa: E501
:type: LastModifiedDateV30Rc1
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this WorkSummaryV30Rc1. # noqa: E501
:return: The source of this WorkSummaryV30Rc1. # noqa: E501
:rtype: SourceV30Rc1
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this WorkSummaryV30Rc1.
:param source: The source of this WorkSummaryV30Rc1. # noqa: E501
:type: SourceV30Rc1
"""
self._source = source
@property
def title(self):
"""Gets the title of this WorkSummaryV30Rc1. # noqa: E501
:return: The title of this WorkSummaryV30Rc1. # noqa: E501
:rtype: WorkTitleV30Rc1
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkSummaryV30Rc1.
:param title: The title of this WorkSummaryV30Rc1. # noqa: E501
:type: WorkTitleV30Rc1
"""
self._title = title
@property
def external_ids(self):
"""Gets the external_ids of this WorkSummaryV30Rc1. # noqa: E501
:return: The external_ids of this WorkSummaryV30Rc1. # noqa: E501
:rtype: ExternalIDsV30Rc1
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this WorkSummaryV30Rc1.
:param external_ids: The external_ids of this WorkSummaryV30Rc1. # noqa: E501
:type: ExternalIDsV30Rc1
"""
self._external_ids = external_ids
@property
def url(self):
"""Gets the url of this WorkSummaryV30Rc1. # noqa: E501
:return: The url of this WorkSummaryV30Rc1. # noqa: E501
:rtype: UrlV30Rc1
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this WorkSummaryV30Rc1.
:param url: The url of this WorkSummaryV30Rc1. # noqa: E501
:type: UrlV30Rc1
"""
self._url = url
@property
def type(self):
"""Gets the type of this WorkSummaryV30Rc1. # noqa: E501
:return: The type of this WorkSummaryV30Rc1. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this WorkSummaryV30Rc1.
:param type: The type of this WorkSummaryV30Rc1. # noqa: E501
:type: str
"""
allowed_values = ["ARTISTIC_PERFORMANCE", "BOOK_CHAPTER", "BOOK_REVIEW", "BOOK", "CONFERENCE_ABSTRACT", "CONFERENCE_PAPER", "CONFERENCE_POSTER", "DATA_SET", "DICTIONARY_ENTRY", "DISCLOSURE", "DISSERTATION", "EDITED_BOOK", "ENCYCLOPEDIA_ENTRY", "INVENTION", "JOURNAL_ARTICLE", "JOURNAL_ISSUE", "LECTURE_SPEECH", "LICENSE", "MAGAZINE_ARTICLE", "MANUAL", "NEWSLETTER_ARTICLE", "NEWSPAPER_ARTICLE", "ONLINE_RESOURCE", "OTHER", "PATENT", "PREPRINT", "REGISTERED_COPYRIGHT", "REPORT", "RESEARCH_TECHNIQUE", "RESEARCH_TOOL", "SOFTWARE", "SPIN_OFF_COMPANY", "STANDARDS_AND_POLICY", "SUPERVISED_STUDENT_PUBLICATION", "TECHNICAL_STANDARD", "TEST", "TRADEMARK", "TRANSLATION", "WEBSITE", "WORKING_PAPER", "UNDEFINED"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def publication_date(self):
"""Gets the publication_date of this WorkSummaryV30Rc1. # noqa: E501
:return: The publication_date of this WorkSummaryV30Rc1. # noqa: E501
:rtype: PublicationDateV30Rc1
"""
return self._publication_date
@publication_date.setter
def publication_date(self, publication_date):
"""Sets the publication_date of this WorkSummaryV30Rc1.
:param publication_date: The publication_date of this WorkSummaryV30Rc1. # noqa: E501
:type: PublicationDateV30Rc1
"""
self._publication_date = publication_date
@property
def journal_title(self):
"""Gets the journal_title of this WorkSummaryV30Rc1. # noqa: E501
:return: The journal_title of this WorkSummaryV30Rc1. # noqa: E501
:rtype: TitleV30Rc1
"""
return self._journal_title
@journal_title.setter
def journal_title(self, journal_title):
"""Sets the journal_title of this WorkSummaryV30Rc1.
:param journal_title: The journal_title of this WorkSummaryV30Rc1. # noqa: E501
:type: TitleV30Rc1
"""
self._journal_title = journal_title
@property
def visibility(self):
"""Gets the visibility of this WorkSummaryV30Rc1. # noqa: E501
:return: The visibility of this WorkSummaryV30Rc1. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this WorkSummaryV30Rc1.
:param visibility: The visibility of this WorkSummaryV30Rc1. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this WorkSummaryV30Rc1. # noqa: E501
:return: The path of this WorkSummaryV30Rc1. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this WorkSummaryV30Rc1.
:param path: The path of this WorkSummaryV30Rc1. # noqa: E501
:type: str
"""
self._path = path
@property
def display_index(self):
"""Gets the display_index of this WorkSummaryV30Rc1. # noqa: E501
:return: The display_index of this WorkSummaryV30Rc1. # noqa: E501
:rtype: str
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this WorkSummaryV30Rc1.
:param display_index: The display_index of this WorkSummaryV30Rc1. # noqa: E501
:type: str
"""
self._display_index = display_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkSummaryV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkSummaryV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| 3,248,907,714,498,165,000
| 30.422122
| 728
| 0.599282
| false
| 3.651626
| false
| false
| false
|
catapult-project/catapult
|
telemetry/telemetry/internal/image_processing/_bitmap.py
|
3
|
8267
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Bitmap is a basic wrapper for image pixels. It includes some basic processing
tools: crop, find bounding box of a color and compute histogram of color values.
"""
from __future__ import absolute_import
import array
try:
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import struct
import subprocess
import warnings
from telemetry.internal.util import binary_manager
from telemetry.core import platform
from telemetry.util import color_histogram
from telemetry.util import rgba_color
import png
class _BitmapTools(object):
"""Wraps a child process of bitmaptools and allows for one command."""
CROP_PIXELS = 0
HISTOGRAM = 1
BOUNDING_BOX = 2
def __init__(self, dimensions, pixels):
binary = binary_manager.FetchPath(
'bitmaptools',
platform.GetHostPlatform().GetOSName(),
platform.GetHostPlatform().GetArchName())
assert binary, 'You must build bitmaptools first!'
self._popen = subprocess.Popen([binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# dimensions are: bpp, width, height, boxleft, boxtop, boxwidth, boxheight
packed_dims = struct.pack('iiiiiii', *dimensions)
self._popen.stdin.write(packed_dims)
# If we got a list of ints, we need to convert it into a byte buffer.
if not isinstance(pixels, bytearray):
pixels = bytearray(pixels)
self._popen.stdin.write(pixels)
def _RunCommand(self, *command):
assert not self._popen.stdin.closed, (
'Exactly one command allowed per instance of tools.')
packed_command = struct.pack('i' * len(command), *command)
self._popen.stdin.write(packed_command)
self._popen.stdin.close()
length_packed = self._popen.stdout.read(struct.calcsize('i'))
if not length_packed:
raise Exception(self._popen.stderr.read())
length = struct.unpack('i', length_packed)[0]
return self._popen.stdout.read(length)
def CropPixels(self):
return self._RunCommand(_BitmapTools.CROP_PIXELS)
def Histogram(self, ignore_color, tolerance):
ignore_color_int = -1 if ignore_color is None else int(ignore_color)
response = self._RunCommand(_BitmapTools.HISTOGRAM,
ignore_color_int, tolerance)
out = array.array('i')
out.fromstring(response)
assert len(out) == 768, (
'The ColorHistogram has the wrong number of buckets: %s' % len(out))
return color_histogram.ColorHistogram(
out[:256], out[256:512], out[512:], ignore_color)
def BoundingBox(self, color, tolerance):
response = self._RunCommand(_BitmapTools.BOUNDING_BOX, int(color),
tolerance)
unpacked = struct.unpack('iiiii', response)
box, count = unpacked[:4], unpacked[-1]
if box[2] < 0 or box[3] < 0:
box = None
return box, count
class Bitmap(object):
"""Utilities for parsing and inspecting a bitmap."""
def __init__(self, bpp, width, height, pixels, metadata=None):
assert bpp in [3, 4], 'Invalid bytes per pixel'
assert width > 0, 'Invalid width'
assert height > 0, 'Invalid height'
assert pixels, 'Must specify pixels'
assert bpp * width * height == len(pixels), 'Dimensions and pixels mismatch'
self._bpp = bpp
self._width = width
self._height = height
self._pixels = pixels
self._metadata = metadata or {}
self._crop_box = None
@property
def bpp(self):
return self._bpp
@property
def width(self):
return self._crop_box[2] if self._crop_box else self._width
@property
def height(self):
return self._crop_box[3] if self._crop_box else self._height
def _PrepareTools(self):
"""Prepares an instance of _BitmapTools which allows exactly one command.
"""
crop_box = self._crop_box or (0, 0, self._width, self._height)
return _BitmapTools((self._bpp, self._width, self._height) + crop_box,
self._pixels)
@property
def pixels(self):
if self._crop_box:
self._pixels = self._PrepareTools().CropPixels()
# pylint: disable=unpacking-non-sequence
_, _, self._width, self._height = self._crop_box
self._crop_box = None
if not isinstance(self._pixels, bytearray):
self._pixels = bytearray(self._pixels)
return self._pixels
@property
def metadata(self):
self._metadata['size'] = (self.width, self.height)
self._metadata['alpha'] = self.bpp == 4
self._metadata['bitdepth'] = 8
return self._metadata
def GetPixelColor(self, x, y):
pixels = self.pixels
base = self._bpp * (y * self._width + x)
if self._bpp == 4:
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2], pixels[base + 3])
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2])
@staticmethod
def FromPng(png_data):
warnings.warn(
'Using pure python png decoder, which could be very slow. To speed up, '
'consider installing numpy & cv2 (OpenCV).')
width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
return Bitmap(4 if meta['alpha'] else 3, width, height, pixels, meta)
@staticmethod
def FromPngFile(path):
with open(path, "rb") as f:
return Bitmap.FromPng(f.read())
def WritePngFile(self, path):
with open(path, "wb") as f:
png.Writer(**self.metadata).write_array(f, self.pixels)
def IsEqual(self, other, tolerance=0):
# Dimensions must be equal
if self.width != other.width or self.height != other.height:
return False
# Loop over each pixel and test for equality
if tolerance or self.bpp != other.bpp:
for y in range(self.height):
for x in range(self.width):
c0 = self.GetPixelColor(x, y)
c1 = other.GetPixelColor(x, y)
if not c0.IsEqual(c1, tolerance):
return False
else:
return self.pixels == other.pixels
return True
def Diff(self, other):
# Output dimensions will be the maximum of the two input dimensions
out_width = max(self.width, other.width)
out_height = max(self.height, other.height)
diff = [[0 for x in range(out_width * 3)] for x in range(out_height)]
# Loop over each pixel and write out the difference
for y in range(out_height):
for x in range(out_width):
if x < self.width and y < self.height:
c0 = self.GetPixelColor(x, y)
else:
c0 = rgba_color.RgbaColor(0, 0, 0, 0)
if x < other.width and y < other.height:
c1 = other.GetPixelColor(x, y)
else:
c1 = rgba_color.RgbaColor(0, 0, 0, 0)
offset = x * 3
diff[y][offset] = abs(c0.r - c1.r)
diff[y][offset+1] = abs(c0.g - c1.g)
diff[y][offset+2] = abs(c0.b - c1.b)
# This particular method can only save to a file, so the result will be
# written into an in-memory buffer and read back into a Bitmap
warnings.warn(
'Using pure python png decoder, which could be very slow. To speed up, '
'consider installing numpy & cv2 (OpenCV).')
diff_img = png.from_array(diff, mode='RGB')
output = StringIO()
try:
diff_img.save(output)
diff = Bitmap.FromPng(output.getvalue())
finally:
output.close()
return diff
def GetBoundingBox(self, color, tolerance=0):
return self._PrepareTools().BoundingBox(color, tolerance)
def Crop(self, left, top, width, height):
cur_box = self._crop_box or (0, 0, self._width, self._height)
cur_left, cur_top, cur_width, cur_height = cur_box
if (left < 0 or top < 0 or
(left + width) > cur_width or
(top + height) > cur_height):
raise ValueError('Invalid dimensions')
self._crop_box = cur_left + left, cur_top + top, width, height
return self
def ColorHistogram(self, ignore_color=None, tolerance=0):
return self._PrepareTools().Histogram(ignore_color, tolerance)
|
bsd-3-clause
| 5,840,911,145,013,504,000
| 32.605691
| 80
| 0.637958
| false
| 3.656347
| false
| false
| false
|
mdshw5/strandex
|
setup.py
|
1
|
1338
|
from setuptools import setup
install_requires = ['six']
def get_version(string):
""" Parse the version number variable __version__ from a script. """
import re
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
version_str = re.search(version_re, string, re.M).group(1)
return version_str
setup(
name='strandex',
version=get_version(open('strandex/__init__.py').read()),
author='Matthew Shirley',
author_email='mdshw5@gmail.com',
url='https://github.com/mdshw5/strandex',
description='Strand-anchored regex for expansion or contraction of FASTQ files',
packages=['strandex'],
install_requires=install_requires,
entry_points = { 'console_scripts': [ 'strandex = strandex:main' ] },
license='MIT',
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
|
mit
| 6,688,157,721,223,414,000
| 37.228571
| 84
| 0.571749
| false
| 3.970326
| false
| false
| false
|
project-schumann/vmf-converter
|
tests/vmf_converter_test.py
|
1
|
39224
|
import unittest
import json
from music21 import converter
from music21 import duration
from music21.chord import Chord
from music21.meter import TimeSignature
from music21.note import Note, Rest
from music21.key import KeySignature
from music21.tempo import MetronomeMark
from vmf_converter.core import vmf_converter_core
class vmfConverterTest(unittest.TestCase):
"""Test Class for vmf_converter_core module"""
def test_convert_score_to_vmf_001(self):
"""
Tests the conversion of a score stream to an vmf data structure.
"""
score = converter.parse('./fixtures/simple.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/simple.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_002(self):
"""
Tests the conversion of a score stream with ties to a vmf data structure.
"""
score = converter.parse('./fixtures/ties.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/ties.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_003(self):
"""
Tests the conversion of a score stream with triplets to a vmf data structure.
"""
score = converter.parse('./fixtures/triplets.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/triplets.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_004(self):
"""
Tests the conversion of a score stream with duplets to a vmf data structure.
"""
score = converter.parse('./fixtures/duplets.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/duplets.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_005(self):
"""
Tests the conversion of a score stream with quintuplets to a vmf data structure.
"""
score = converter.parse('./fixtures/quintuplets.xml')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/quintuplets.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_006(self):
"""
Tests the conversion of a score stream with a simple to simple meter change to a vmf data structure.
"""
score = converter.parse('./fixtures/SimpleToSimple.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/SimpleToSimple.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
print(actual)
assert expected == actual
def test_convert_score_to_vmf_007(self):
"""
Tests the conversion of a score stream with a compound to compound meter change to a vmf data structure.
"""
score = converter.parse('./fixtures/CompoundToCompound.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/CompoundToCompound.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
print(actual)
assert expected == actual
def test_convert_score_to_vmf_008(self):
"""
Tests the conversion of a score stream with a simple to compound meter change to a vmf data structure.
"""
score = converter.parse('./fixtures/SimpleToCompound.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/SimpleToCompound.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_009(self):
"""
Tests the conversion of a score stream with a compound to simple meter change to a vmf data structure.
"""
score = converter.parse('./fixtures/CompoundToSimple.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/CompoundToSimple.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_010(self):
"""
Tests the conversion of a score stream with chords to a vmf data structure.
"""
score = converter.parse('./fixtures/chords.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/chords.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_011(self):
"""
Tests the conversion of a score stream with multiple voices to a vmf data structure.
"""
score = converter.parse('./fixtures/voices.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/voices.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_012(self):
"""
Tests the conversion of a score stream with dynamics to a vmf data structure.
"""
score = converter.parse('./fixtures/dynamics.mid')
first_phrase = score.measures(0, 5)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/dynamics.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_013(self):
"""
Tests the conversion of a score stream with dynamics to a vmf data structure.
"""
score = converter.parse('./fixtures/dottedQuarter.mid')
first_phrase = score.measures(0, 2)
actual = vmf_converter_core.convert_score_to_vmf(first_phrase)
with open('./expected/dottedQuarter.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_014(self):
"""
Tests a key signature change.
"""
score = converter.parse('./fixtures/keyChange.mid')
actual = vmf_converter_core.convert_score_to_vmf(score)
with open('./expected/keyChange.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_015(self):
"""
Tests a tempo change.
"""
score = converter.parse('./fixtures/tempoChange.mid')
actual = vmf_converter_core.convert_score_to_vmf(score)
with open('./expected/tempoChange.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_016(self):
"""
Tests an explicit anacrusis.
"""
score = converter.parse('./fixtures/anacrusis2.xml')
actual = vmf_converter_core.convert_score_to_vmf(score)
with open('./expected/anacrusis.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_017(self):
"""
Tests the conversion of a score stream with chords and sustained notes..
"""
score = converter.parse('./fixtures/chordsAndSustain.xml')
actual = vmf_converter_core.convert_score_to_vmf(score)
with open('./expected/chordsAndSustain.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_convert_score_to_vmf_018(self):
"""
Tests the conversion of a score stream with syncopated chords..
"""
score = converter.parse('./fixtures/syncopated.xml')
actual = vmf_converter_core.convert_score_to_vmf(score)
with open('./expected/syncopated.vmf', 'r') as expected_file:
expected_json = expected_file.read()
expected = json.loads(expected_json)
assert expected == actual
def test_scan_score_durations_001(self):
"""
Tests the scanning function which pre-analyzes the score to determine the
smallest necessary note value to accurately encode the score as a vmf.
"""
score = converter.parse('./fixtures/aus_meines_herz.mid')
shortest_duration = vmf_converter_core.scan_score_durations(score)
assert shortest_duration == duration.convertTypeToQuarterLength('eighth')
def test_scan_score_for_largest_chord_001(self):
"""
Tests the scanning function which pre-analyzes the score to determine the
largest chord size.
"""
score = converter.parse('./fixtures/chords.mid')
largest_chord_size = vmf_converter_core.scan_score_for_largest_chord(score)
assert largest_chord_size == 3
def test_scan_score_for_number_of_voices_001(self):
"""
Tests the scanning function which pre-analyzes the score to determine the
number of voices in each part.
"""
score = converter.parse('./fixtures/voices.mid')
first_phrase = score.measures(0, 2)
number_of_parts = vmf_converter_core.scan_score_for_number_of_voices(first_phrase)
assert number_of_parts == 3
def test_convert_vmf_to_midi_001(self):
"""
Tests the conversion of a simple vmf file to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/simple.vmf')
expected_score = converter.parse('./fixtures/simple.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_convert_vmf_to_midi_002(self):
"""
Tests the conversion of a vmf file with ties to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/ties.vmf')
expected_score = converter.parse('./fixtures/ties.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_convert_vmf_to_midi_003(self):
"""
Tests the conversion of a vmf file with rhythmic dots to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/dottedQuarter.vmf')
expected_score = converter.parse('./fixtures/dottedQuarter.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_convert_vmf_to_midi_004(self):
"""
Tests the conversion of a vmf file with triplets to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/triplets.vmf')
expected_score = converter.parse('./fixtures/triplets.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_convert_vmf_to_midi_005(self):
"""
Tests the conversion of a vmf file with duplets to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/duplets.vmf')
expected_score = converter.parse('./fixtures/duplets.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_convert_vmf_to_midi_006(self):
"""
Tests the conversion of a vmf file with a simple to simple meter change to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/SimpleToSimple.vmf')
expected_score = converter.parse('./fixtures/SimpleToSimple.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
# Check that the time signatures are encoded.
expected_time_signatures = expected_score.flat.getElementsByClass(TimeSignature)
actual_time_signatures = actual_score.flat.getElementsByClass(TimeSignature)
# Ensure we have the right number of time signatures.
assert len(expected_time_signatures) == len(actual_time_signatures)
for expected, actual in zip(expected_time_signatures, actual_time_signatures):
assert expected.ratioString == actual.ratioString
assert expected.offset == actual.offset
def test_convert_vmf_to_midi_007(self):
"""
Tests the conversion of a vmf file with a compound to compound meter change to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/CompoundToCompound.vmf')
expected_score = converter.parse('./fixtures/CompoundToCompound.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
# Check that the time signatures are encoded.
expected_time_signatures = expected_score.flat.getElementsByClass(TimeSignature)
actual_time_signatures = actual_score.flat.getElementsByClass(TimeSignature)
# Ensure we have the right number of time signatures.
assert len(expected_time_signatures) == len(actual_time_signatures)
for expected, actual in zip(expected_time_signatures, actual_time_signatures):
assert expected.ratioString == actual.ratioString
assert expected.offset == actual.offset
def test_convert_vmf_to_midi_008(self):
"""
Tests the conversion of a vmf file with a simple to compound meter change to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/SimpleToCompound.vmf')
expected_score = converter.parse('./fixtures/SimpleToCompound.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
# Check that the time signatures are encoded.
expected_time_signatures = expected_score.flat.getElementsByClass(TimeSignature)
actual_time_signatures = actual_score.flat.getElementsByClass(TimeSignature)
# Ensure we have the right number of time signatures.
assert len(expected_time_signatures) == len(actual_time_signatures)
for expected, actual in zip(expected_time_signatures, actual_time_signatures):
assert expected.ratioString == actual.ratioString
assert expected.offset == actual.offset
def test_convert_vmf_to_midi_009(self):
"""
Tests the conversion of a vmf file with a compound to simple meter change to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/CompoundToSimple.vmf')
expected_score = converter.parse('./fixtures/CompoundToSimple.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
# Check that the time signatures are encoded.
expected_time_signatures = expected_score.flat.getElementsByClass(TimeSignature)
actual_time_signatures = actual_score.flat.getElementsByClass(TimeSignature)
# Ensure we have the right number of time signatures.
assert len(expected_time_signatures) == len(actual_time_signatures)
for expected, actual in zip(expected_time_signatures, actual_time_signatures):
assert expected.ratioString == actual.ratioString
assert expected.offset == actual.offset
def test_convert_vmf_to_midi_010(self):
"""
Tests the conversion of a vmf file with chords to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/chords.vmf')
expected_score = converter.parse('./fixtures/chords.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Chord:
assert len(expected_element.pitches) == len(actual_element.pitches)
assert expected_element.quarterLength == actual_element.quarterLength
for actual_pitch, expected_pitch in zip(expected_element.pitches, actual_element.pitches):
assert expected_pitch.pitchClass == actual_pitch.pitchClass
assert expected_pitch.octave == actual_pitch.octave
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
# Check that the time signatures are encoded.
expected_time_signatures = expected_score.flat.getElementsByClass(TimeSignature)
actual_time_signatures = actual_score.flat.getElementsByClass(TimeSignature)
# Ensure we have the right number of time signatures.
assert len(expected_time_signatures) == len(actual_time_signatures)
for expected, actual in zip(expected_time_signatures, actual_time_signatures):
assert expected.ratioString == actual.ratioString
assert expected.offset == actual.offset
def test_convert_vmf_to_midi_011(self):
"""
Tests the conversion of a vmf file with multiple voices to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/voices.vmf')
expected_score = converter.parse('./fixtures/voices.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Chord:
assert len(expected_element.pitches) == len(actual_element.pitches)
assert expected_element.quarterLength == actual_element.quarterLength
for actual_pitch, expected_pitch in zip(expected_element.pitches, actual_element.pitches):
assert expected_pitch.pitchClass == actual_pitch.pitchClass
assert expected_pitch.octave == actual_pitch.octave
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_convert_vmf_to_midi_012(self):
"""
Tests the conversion of a vmf file with dynamics to a midi file.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/dynamics.vmf')
expected_score = converter.parse('./fixtures/dynamics.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements, actual.flat.notesAndRests.elements):
if type(expected_element) is Chord:
assert len(expected_element.pitches) == len(actual_element.pitches)
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.volume.velocity == actual_element.volume.velocity
for actual_pitch, expected_pitch in zip(expected_element.pitches, actual_element.pitches):
assert expected_pitch.pitchClass == actual_pitch.pitchClass
assert expected_pitch.octave == actual_pitch.octave
elif type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
assert expected_element.volume.velocity == actual_element.volume.velocity
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_convert_vmf_to_midi_013(self):
"""
Tests a key signature change.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/keyChange.vmf')
expected_score = converter.parse('./fixtures/keyChange.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements,
actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
# Check that the key signatures are encoded.
expected_key_signatures = expected_score.flat.getElementsByClass(KeySignature)
actual_key_signatures = actual_score.flat.getElementsByClass(KeySignature)
# Ensure we have the right number of key signatures.
assert len(expected_key_signatures) == len(actual_key_signatures)
for expected, actual in zip(expected_key_signatures, actual_key_signatures):
assert expected.sharps == actual.sharps
assert expected.offset == actual.offset
def test_convert_vmf_to_midi_014(self):
"""
Tests a tempo change.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/tempoChange.vmf')
expected_score = converter.parse('./fixtures/tempoChange.mid')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements,
actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
# Check that the tempos are encoded.
expected_tempos = expected_score.flat.getElementsByClass(MetronomeMark)
actual_tempos = actual_score.flat.getElementsByClass(MetronomeMark)
# Ensure we have the right number of tempos.
assert len(expected_tempos) == len(actual_tempos)
for expected, actual in zip(expected_tempos, actual_tempos):
assert expected.number == actual.number
assert expected.offset == actual.offset
def test_read_vmf_string_001(self):
"""
Tests reading a VMF file with articulations
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/articulation.vmf')
expected_score = converter.parse('./fixtures/articulation.xml')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements,
actual.flat.notesAndRests.elements):
if type(expected_element) is Chord:
assert len(expected_element.pitches) == len(actual_element.pitches)
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.volume.velocity == actual_element.volume.velocity
for actual_pitch, expected_pitch in zip(expected_element.pitches, actual_element.pitches):
assert expected_pitch.pitchClass == actual_pitch.pitchClass
assert expected_pitch.octave == actual_pitch.octave
elif type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
# Equality on articulations is not well implemented in music21.
for a, b in zip(expected_element.articulations, actual_element.articulations):
assert type(a) == type(b)
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_read_vmf_string_002(self):
"""
Tests reading a VMF file with a pickup measure.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/anacrusis.vmf')
expected_score = converter.parse('./fixtures/anacrusis.xml')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements,
actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_read_vmf_string_003(self):
"""
Tests reading a VMF file with a pickup and a measure of rests.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/anacrusisAndRests.vmf')
expected_score = converter.parse('./fixtures/anacrusisAndRests.xml')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements,
actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_read_vmf_string_004(self):
"""
Tests reading a VMF file with a quintuplet.
"""
actual_score = vmf_converter_core.read_vmf_file('./expected/quintuplets.vmf')
expected_score = converter.parse('./fixtures/quintuplets.xml')
# Assert that the file has the right number of parts.
assert len(expected_score.parts) == len(actual_score.parts)
# Assert that the notes and rests match
for expected, actual in zip(expected_score.parts, actual_score.parts):
for expected_element, actual_element in zip(expected.flat.notesAndRests.elements,
actual.flat.notesAndRests.elements):
if type(expected_element) is Note:
assert expected_element.quarterLength == actual_element.quarterLength
assert expected_element.pitch.pitchClass == actual_element.pitch.pitchClass
assert expected_element.pitch.octave == actual_element.pitch.octave
elif type(expected_element) is Rest:
assert expected_element.quarterLength == actual_element.quarterLength
def test_find_number_of_notes_in_tick_001(self):
"""
Tests finding the number of notes in a tick
"""
tick = [1,-1,0,0,4,-1,-1,-1,-1,0]
number_of_notes = vmf_converter_core.find_number_of_notes_in_tick(tick)
assert number_of_notes == 1
def test_find_number_of_notes_in_tick_002(self):
"""
Tests finding the number of notes in a tick
"""
tick = [1,-1,0,0,4,0,0,-1,-1,0]
number_of_notes = vmf_converter_core.find_number_of_notes_in_tick(tick)
assert number_of_notes == 2
|
mit
| 8,606,255,815,061,616,000
| 47.605948
| 130
| 0.642158
| false
| 4.258387
| true
| false
| false
|
cjgibson/mechkbot
|
bot.py
|
1
|
61053
|
# -*- coding: utf-8 -*-
###
# AUTHORS: CHRISTIAN GIBSON,
# PROJECT: /r/MechMarket Bot
# UPDATED: SEPTEMBER 11, 2015
# USAGE: python bot.py [-h / --help] [-is / --interactive-shell]
# EXPECTS: python 3.4.0
# beautifulsoup4 4.4.0
# praw 3.2.1
# regex 2015.06.24
###
import argparse
import bs4
import cmd
import collections
import configparser
import copy
import errno
import inspect
import logging
import math
import multiprocessing
import os
import platform
import praw
import random
import regex
import shelve
import shutil
import threading
import time
import traceback
import urllib
import uuid
__AUTHORS__ = ['/u/NotMelNoGuitars']
__VERSION__ = 0.1
__CMD_STR__ = '>>> '
__INFO__ = 'MechKB0t-v%s on "%s" with << %s v%s >> at %s %s' % (
__VERSION__,
platform.platform(),
platform.python_implementation(),
platform.python_version(),
time.ctime(),
time.localtime().tm_zone)
def coerce_reddit_handles(handles=__AUTHORS__):
clean = regex.compile(r'[^A-Z0-9_/-]', regex.UNICODE + regex.IGNORECASE)
authors = []
for author in handles:
author = clean.sub('', str(author))
if ((author.startswith('/u/') or author.startswith('/r/'))
and len(author.split('/')) == 3):
authors.append(author)
else:
authors.append('/u/' + max(author.split('/'), key=len))
return authors
class config_generator():
# c = bot.config_generator()(bot.bot.CONFIG_DEFAULTS)() ; print(c.func_code)
_FUNC_CODE_ = """class config_handler(configparser.RawConfigParser):
def __init__(self, conf_file=None):
super(self.__class__, self).__init__()
self.true_values = frozenset(['true', 't', '1', 'y', 'yes', 'aye', 'on',
'use', 'active', 'activate'])
self.heatware_regex = None
if conf_file:
self.conf_file = os.path.abspath(conf_file)
else:
try:
self.conf_file = (os.path.dirname(os.path.abspath(
inspect.getsourcefile(lambda: None))) + os.sep + 'config.cfg')
except:
self.conf_file = None
if self.conf_file:
try:
self.read(self.conf_file)
if not self.sections():
self.generate_defaults()
self.status = errno.ENOENT
else:
self.status = 0
except:
traceback.print_exc()
self.status = errno.EIO
else:
self.status = errno.EBADF
def store(self):
with open(self.conf_file, 'w') as conf_handle:
self.write(conf_handle)
def protected_pull(self, section, option, cast=None, default=None):
if self.status:
raise EnvironmentError(self.status,
('Current status #%d <%s> "%s".' %
(self.status,
errno.errorcode[self.status],
os.strerror(self.status))),
self.conf_file)
try:
if cast:
return cast(self.get(section, option))
else:
return self.get(section, option)
except:
if default:
return default
else:
raise
def protected_pullboolean(self, section, option):
boolean = self.protected_pull(section, option).lower()
if boolean in self.true_values:
return True
return False
def protected_push(self, section, option, value):
if self.status:
raise EnvironmentError(self.status,
('Current status #%d <%s> "%s".' %
(self.status,
errno.errorcode[self.status],
os.strerror(self.status))),
self.conf_file)
try:
self.set(section, option, value)
self.store()
return True
except:
return False
def protected_pushboolean(self, section, option, value):
if value is True or value in self.true_values:
return self.protected_push(section, option, 'true')
return self.protected_push(section, option, 'false')
"""
def __init__(self):
pass
def __call__(self, sections, ignore_description=False):
if all(all('desc' in detail for _, detail in options.items())
for _, options in sections.items()) or ignore_description:
pass
else:
raise TypeError('Provided configuration does not provide a "desc" '
'field for each section option. As such, the %s '
'cannot create an interactive_initialization() '
'method. To create the constructor without the '
'interactive_initialization() method, set '
'"ignore_description" to True when calling %s.'
% (self.__class__, self.__class__))
added_methods = {attr_or_func: None
for attr_or_func in dir(configparser.RawConfigParser)}
added_methods['conf_file'] = None
added_methods['func_code'] = None
added_methods['heatware_regex'] = None
added_methods['protected_pull'] = None
added_methods['protected_pullboolean'] = None
added_methods['protected_push'] = None
added_methods['protected_pushboolean'] = None
added_methods['status'] = None
added_methods['store'] = None
added_methods['true_values'] = None
if ignore_description:
added_methods['generate_defaults'] = None
else:
added_methods['generate_defaults'] = None
added_methods['interactive_initialization'] = None
init_initials = [" def interactive_initialization(self):",
" to_initialize = ["]
init_defaults = [" def generate_defaults(self):"]
for section, options in sections.items():
init_defaults.append(" self.add_section('%s')" % section)
for option, detail in options.items():
if 'boolean' in detail:
pulltype = 'protected_pullboolean'
pushtype = 'protected_pushboolean'
else:
pulltype = 'protected_pull'
pushtype = 'protected_push'
if 'get' in detail:
if detail['get']:
get_method = detail['get']
else:
get_method = None
else:
get_method = 'get_%s_%s' % (section, option)
if get_method in added_methods:
raise SyntaxError('Attempted to add get method %s to new '
'config_handler object, but it was '
'already defined.' % get_method)
if get_method:
added_methods[get_method] = (
" def %s(self):\n"
" return self.%s('%s', '%s')\n"
% (get_method, pulltype, section, option))
if 'set' in detail:
if detail['set']:
set_method = detail['set']
else:
set_method = None
else:
set_method = 'set_%s_%s' % (section, option)
if set_method in added_methods:
raise SyntaxError('Attempted to add set method %s to new '
'config_handler object, but it was '
'already defined.' % set_method)
if set_method:
added_methods[set_method] = (
" def %s(self, value):\n"
" return self.%s('%s', '%s', value)\n"
% (set_method, pushtype, section, option))
if 'def' in detail:
init_defaults.append(
" self.set('%s', '%s', '%s')" %
(section, option, detail['def']))
else:
init_defaults.append(
" self.set('%s', '%s', '%s')" %
(section, option, ""))
if not ignore_description:
if 'def' in detail:
init_initials.append(
" ('%s', '%s', '%s', '%s', '%s')," %
(self.sanify(detail['desc']),
self.sanify(detail['def']),
pushtype, section, option))
else:
init_initials.append(
" ('%s', None, '%s', '%s', '%s')," %
(self.sanify(detail['desc']),
pushtype, section, option))
added_methods['generate_defaults'] = ('\n'.join(init_defaults) + '\n' +
' self.store()\n')
if not ignore_description:
init_initials.extend([
" ]",
"",
" for desc, def_, fxn, sec, opt in to_initialize:",
" value_set = False",
" while not value_set:",
" try:",
" print('Now setting [%s].[%s]:' % (sec, opt))",
" print('Description: %s' % desc)",
" if def_:",
" print('Leave blank to use default '",
" 'value \"%s\".' % def_)",
" val = input('Set [%s].[%s]: ' % (sec, opt))",
" if val:",
" getattr(self, fxn)(sec, opt, val)",
" value_set = True",
" elif def_:",
" getattr(self, fxn)(sec, opt, def_)",
" value_set = True",
" else:",
" print('(!!!) Invalid value provided, '",
" 'or no value provided with no '",
" 'default available.\\n')",
" if value_set:",
" rec = self.get(sec, opt)",
" print('Value set as \"%s\".' % rec,",
" end=' ')",
" chk = input('Is that correct? (y/n) ')",
" if chk.lower().strip().startswith('y'):",
" print('Input accepted and stored.'",
" '\\f\\n\\r')",
" else:",
" print('Interpreted response as '",
" '\"no\". Will recapture '",
" 'input.\\n')",
" value_set = False",
" except KeyboardInterrupt:",
" raise",
" except:",
" print('(!!!) Error encountered when '",
" 'attempting to set value.\\n')",
" self.store()"
])
added_methods['interactive_initialization'] = (
'\n'.join(init_initials) + '\n')
_func_code_ = (self._FUNC_CODE_ +
'\n'.join(filter(lambda x: isinstance(x, str),
added_methods.values())))
exec(compile(_func_code_, '<string>', 'exec'))
config = eval('config_handler')
config.func_code = _func_code_
return config
def sanify(self, text):
return text.encode('unicode-escape').decode().replace("'", "\\'")
_BS4_PARSER = 'html.parser'
_GET_CONFIG = config_generator()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class bot_prompt(cmd.Cmd):
# errno.ENOTTY
def __init__(self):
super(self.__class__, self).__init__()
self.prompt = __CMD_STR__
self.size = shutil.get_terminal_size()
self.height, self.width = self.size.lines, self.size.columns
class bot(praw.Reddit, threading.Thread):
CONFIG_DEFAULTS = collections.OrderedDict([
('crawl', collections.OrderedDict([
('file', {'def': 'data.record',
'desc': ('This is the name of the flatfile that will be '
'used to store all collected data on a user-by-'
'user basis.')}),
('hold', {'def': '10',
'desc': ('This is the number of seconds the bot will '
'spend in each state as a minimum.\nAs an '
'example, the bot has three states by default:\n'
' 1. Crawl /new of the target subreddit.\n'
' 2. Respond to user PMs.\n'
' 3. Crawl the trade thread of the target '
'subreddit.')}),
('sleep', {'def': '100',
'desc': ('This is the number of seconds the bot will '
'spend doing nothing after completing each set '
'of states.')})
])),
('reddit', collections.OrderedDict([
('user_agent', {'def': ('%s-%s:%s:MechKB0t-v%s (by %s)' %
(platform.system(), platform.processor(),
uuid.uuid5(uuid.NAMESPACE_OID, __INFO__),
__VERSION__,
', '.join(coerce_reddit_handles()))),
'desc': ('This is the plaintext string that will '
'be used by the admins at reddit to '
'identify this bot. It is recommended '
'that bots follow the format:\n'
' <platform>:<app ID>:<version string> '
'(by /u/<reddit username>)\n'
'Full rules and restrictions can be '
'found here: http://github.com/reddit/'
'reddit/wiki/API.')}),
('client_id', {'desc': ('This is the OAuth2 client_id created '
'for your reddit app instance. More '
'information can be found here: http://'
'github.com/reddit/reddit/wiki/OAuth2.')}),
('client_secret', {'desc': ('This is the OAuth2 client_secret '
'created for your reddit app instance. '
'More information can be found here: '
'http://github.com/reddit/reddit/wiki'
'/OAuth2.')}),
('redirect_url', {'desc': ('This is the OAuth2 redirect_url created '
'for your reddit app instance. More '
'information can be found here: http://'
'github.com/reddit/reddit/wiki/OAuth2.')}),
('subreddit', {'desc': 'The subreddit targeted by this bot.'}),
('multiprocess', {'def': 'false',
'get': 'is_multiprocessed',
'set': None,
'desc': 'Currently not implemented. Ignore.',
'boolean': True}),
('verbose', {'def': 'true',
'get': 'is_verbose',
'set': 'set_verbose',
'desc': ('Sets whether the bot will display its '
'actions during runtime, or simply log them.'),
'boolean': True})
])),
('monitor', collections.OrderedDict([
('log', {'def': 'event.log',
'desc': ('This is the flatfile that will be used to log '
'all actions taken by the bot.')}),
('posts', {'def': 'true',
'desc': ('Whether or not the bot will log basic '
'information concerning all posts observed '
'during its runtime.'),
'boolean': True}),
('users', {'def': 'true',
'desc': ('Whether or not the bot will record basic '
'infromation concerning all users observed '
'during its runtime.'),
'boolean': True}),
('format', {'def': '%(created)f -- %(levelname)s -> %(message)s',
'desc': ('This is the format string that will be used '
'in creating each entry in the log file. '
'Formatting options include:\n'
' %(asctime)s: Human-readable time when a '
'logged event was created.\n'
' %(created)f: Seconds since epoch when a '
'logged event was created.\n'
' %(filename)s: Source file that created a '
'logged event.\n'
' %(funcName)s: Function used that created a '
'logged event.\n'
' %(levelname)s: Severity of logged event as '
'an English string.\n'
' %(levelno)s: Severity of logged event as a '
'numeric value.\n'
' %(lineno)d: Line number of the source file '
'where a logged event was created.\n'
' %(module)s: Module that created a logged '
'event.\n'
' %(msecs)d: Millisecond portion of system '
'time when an event was logged.\n'
' %(message)s: Message provided when an event '
'was logged.\n'
' %(name)s: Name of the logger used to create '
'the logged event.\n'
' %(pathname)s: Full pathname of the source '
'file that created the logged event.\n'
' %(process)d: Process ID that created the '
'logged event.\n'
' %(processName)s: Process name that created '
'the logged event.\n'
' %(relativeCreated)d: Milliseconds after the '
'logging module was initially loaded that an '
'event was logged.\n'
' %(thread)d: Thread ID that created the '
'logged event.\n'
' %(threadName)s: Thread name that created '
'the logged event.\n'
'Further information can be found at: '
'http://docs.python.org/3.4/library/logging.'
'html#logging.LogRecord')},
('respond', {'def': 'true',
'desc': ('Whether or not the bot should make a post '
'on each new trade thread.'),
'boolean': True}),
('response', {'desc': ('The text template used when commenting on '
'a new trade thread. Formatting options '
'include:\n')})),
])),
('sidebar', collections.OrderedDict([
('add_button', {'def': 'false',
'get': 'should_add_button',
'desc': ('Whether the bot should add a button for '
'the current trade thread on the target '
'subreddit\'s sidebar.'),
'boolean': True}),
('button_text', {'desc': 'The text used for the created button.'}),
('button_start', {'desc': ('A specialized tag, included in the '
'sidebar\'s text, which determines '
'where the button starts.')}),
('button_end', {'desc': ('A specialized tag, included in the '
'sidebar\'s text, which determines where '
'the button ends.')})
])),
('class', collections.OrderedDict([
('use', {'def': 'true',
'desc': 'If the bot should monitor and update user flair.',
'boolean': True}),
('start', {'desc': 'Flair given to users never seen before.'}),
('limit', {'desc': ('Maximum integer indicating how many times '
'a user\'s flair can be incremented.')}),
('ignore', {'desc': ('A whitespace-separated list of flairs which '
'should be ignored if encountered by the bot.')}),
('pattern', {'desc': ('The pattern used to generate new user '
'flair following an increment. %i is used '
'to indicate where the integer value of the '
'flair should go. As a example, a flair '
'pattern of "u-%i" would take on the values '
'"u-1" for a user with a flair value of 1, '
'"u-2" for a user with a flair value of 2, '
'"u-3" for a user with a flair value of 3, '
'etc.')}),
('increment', {'def': '1',
'desc': ('The integer value that a user\'s flair '
'value will be incremented by with each '
'flair increment. Given a default value '
'of "1", a user with a flair value of 3 '
'would advance to a flair value of 4 after '
'completing a trade.')})
])),
('trade', collections.OrderedDict([
('method', {'def': 'post',
'desc': ('The method used by the bot to confirm user '
'trades. Three options are available, "pm", '
'"post", or "both". If "pm" is specified, '
'trades will be confirmed via private '
'message; with the sender in a trade sending '
'a private message to the bot containing the '
'reddit handle of the recipient. The bot then '
'contacts the other party, who confirms the '
'trade. If "post" is specified, a public '
'thread is used. Within the thread, the '
'sender creates a top-level comment, which '
'the recipient replies to with a comment '
'containing the phrase "confirmed". In the '
'case that "both" is specified, either option '
'can be used to confirm a trade.')}),
('post_id', {'desc': ('The id used by the trading thread within '
'the target subreddit. If left blank, the '
'bot will create its own trading thread. In '
'the case that "pm" is used as a method, '
'this value is ignored.')}),
('post_text', {'desc': ('The text template used when creating a '
'new trade thread. Supports formatting '
'arguments as found in Python\'s strftime '
'command. For more information, see: '
'https://docs.python.org/2/library/time.html'
'#time.strftime.')}),
('post_rate', {'def': 'monthly',
'desc': ('The rate at which the bot will create '
'new trading posts on the target subreddit.'
' Provided options include "daily", '
'"weekly", "monthly", "yearly", and "never"'
'. If "never" is selected, the post_id will'
' have to be updated manually by the user.')}),
('post_title', {'desc': ('The title template used when creating a '
'new trade thread\'s title. Supports '
'formatting arguments as found in Python\'s'
'strftime command. For more information, '
'see: https://docs.python.org/2/library/'
'time.html#time.strftime.')}),
('post_sticky', {'def': 'false',
'desc': ('If the bot makes the trade thread sticky'
' or not.')}),
('post_response', {'desc': ('The text template used when replying '
'to a confirmed trade comment on a '
'trade post. Supports formatting '
'arguments as found in Python\'s '
'strftime command. For more information'
', see: https://docs.python.org/2/'
'library/time.html#time.strftime.')}),
('message_text', {'desc': ('The text template used when sending a '
'private message to both users following'
' a confirmed trade. Supports formatting'
' arguments as found in Python\'s '
'strftime command. For more information,'
' see: https://docs.python.org/2/library'
'/time.html#time.strftime.')}),
('message_title', {'desc': ('The title template used when sending a '
'private message to both users '
'following a confirmed trade. Supports '
'formatting arguments as found in '
'Python\'s strftime command. For more '
'information, see: https://docs.python.'
'org/2/library/time.html#time.strftime.')}),
('respond', {'def': 'true',
'desc': ('If the bot should respond following a '
'confirmed trade or not.'),
'boolean': True}),
('age_msg', {'desc': ('Message used to reply when a user attempts '
'to confirm a trade when their account is '
'younger than the provided age limit.')}),
('age_type', {'def': 'days',
'desc': ('Units used in determining if a user\'s '
'account is too young to confirm a trade. '
'Options are "seconds", "minutes", "hours", '
'"days", "months".')}),
('age_limit', {'def': '30',
'desc': ('Numerical measurement used in determining '
'if a user\'s account is too young to '
'confirm a trade.')}),
('same_msg', {'desc': ('Message used to reply when a user attempts '
'to confirm a trade with themselves.')}),
('karma_msg', {'desc': ('Message used to reply when a user attempts'
' to confirm a trade when their account\'s '
'karma is below the provided karma limit.')}),
('karma_type', {'def': 'both',
'desc': ('Units used in determining if a user\'s '
'account has sufficient karma to confirm '
'a trade. Options are "comment", "link", '
'or "both".')}),
('karma_limit', {'def': '100',
'desc': ('Numerical measurement used in '
'determining if a user\'s account has '
'sufficient karma to confirm a trade.')})
])),
('heatware', collections.OrderedDict([
('method', {'def': 'pm',
'desc': ('The method by which the bot will collect a '
'user\'s heatware URL. Three options are '
'available, "pm", "post", and "both". If "pm" '
'is specified, users can submit heatware URLs '
'by means of private message to the bot. If '
'"post" is specified, users can submit their '
'heatware URLs by means of commenting in a '
'specified post. If "both" is specified, '
'either method can be used.')}),
('post_id', {'desc': ('The id used by the heatware thread in the '
'target subreddit.')}),
('post_text', {'desc': ('The text template used when creating a '
'new heatware thread. Supports formatting '
'arguments as found in Python\'s strftime '
'command. For more information, see: '
'https://docs.python.org/2/library/time.html'
'#time.strftime.')}),
('post_rate', {'def': 'yearly',
'desc': ('The rate at which the bot will create '
'new heatware posts on the target subreddit.'
' Provided options include "daily", '
'"weekly", "monthly", "yearly", and "never"'
'. If "never" is selected, the post_id will'
' have to be updated manually by the user.')}),
('post_title', {'desc': ('The title template used when creating a '
'new heatware thread\'s title. Supports '
'formatting arguments as found in Python\'s'
'strftime command. For more information, '
'see: https://docs.python.org/2/library/'
'time.html#time.strftime.')}),
('post_sticky', {'desc': ('If the bot makes the heatware thread '
'sticky or not.')}),
('post_response', {'desc': ('The text template used when replying '
'to an accepted heatware comment on a '
'heatware post. Supports formatting '
'arguments as found in Python\'s '
'strftime command. For more information'
', see: https://docs.python.org/2/'
'library/time.html#time.strftime.')}),
('message_text', {'desc': ('The text template used when sending a '
'private message to a user following'
' an accepted heatware profile. Supports '
'formatting arguments as found in Python\'s'
' strftime command. For more information,'
' see: https://docs.python.org/2/library'
'/time.html#time.strftime.')}),
('message_title', {'desc': ('The title template used when sending a '
'private message to a user following '
'an accepted heatware profile. Supports '
'formatting arguments as found in '
'Python\'s strftime command. For more '
'information, see: https://docs.python.'
'org/2/library/time.html#time.strftime.')}),
('regex', {'def': '(?:.*)(http(?:s?)://www\.heatware\.com/eval\.php\?id=[0-9]+)(?:.*)',
'set': None,
'desc': ('The regular expression used to _extract '
'heatware URLs from plaintext comments.')}),
('group', {'def': '1',
'set': None,
'desc': ('The group within the regular expression that '
'actually contained the captured heatware URL. '
'If left blank, the parser will accept the '
'entire match resulting from the regular '
'expression.')}),
('respond', {'def': 'true',
'desc': ('If a bot should respond to an accepted '
'heatware profile URL or not.'),
'boolean': True})
]))
])
def __init__(self, conf_file='config.cfg'):
config_constructor = _GET_CONFIG(self.CONFIG_DEFAULTS)
self.config_handler = config_constructor(conf_file)
if self.config_handler.status:
raise EnvironmentError(self.config_handler.status,
('Current status #%d <%s> "%s".' %
(self.config_handler.status,
errno.errorcode[
self.config_handler.status],
os.strerror(self.config_handler.status))),
conf_file)
log = logging.StreamHandler(self.config_handler.get_monitor_log())
fmt = logging.Formatter(self.config_handler.get_monitor_format())
log.setLevel(logging.DEBUG)
log.setFormatter(fmt)
logger.addHandler(log)
self.data_store = database_handler(
self.config_handler.get_crawl_file())
self.heat_parse = heatware_crawler()
self.run_states = {
state[6:].lstrip('_'): getattr(self, state)
for state in set(dir(self)).difference(dir(super()))
if (state.startswith('_state')
and hasattr(getattr(self, state), '__call__'))}
super().__init__(self.config_handler.get_reddit_user_agent())
self.set_oauth_app_info(self.config_handler.get_reddit_client_id(),
self.config_handler.get_reddit_client_secret(),
self.config_handler.get_reddit_redirect_url())
threading.Thread.__init__(self, daemon=True)
def run(self):
while True:
state_time = {state: max(1, self.config_handler.get_crawl_hold())
for state in self.run_states}
while any(t > 0 for t in state_time.values()):
for state, function in self.run_states.items():
if state_time[state] > 0:
self.state = state
state_start = time.time()
try:
function()
except:
pass
state_elaps = time.time() - state_start
if state_elaps > 0:
state_time[state] -= state_elaps
else:
state_time[state] = 0
time.sleep(self.config_handler.get_crawl_sleep())
self.shutdown()
def _state_trade(self):
"""
Performs processing necessary for the verification and updating
of user's css class following a successful trade.
Will need to call the following methods from self.config_handler:
get_trade_method()
if get_trade_method() in ['post', 'both']:
get_trade_post_id()
get_trade_post_text()
get_trade_post_rate()
get_trade_post_title()
get_trade_post_sticky()
get_trade_post_response()
should_add_button()
get_sidebar_button_text()
get_sidebar_button_start()
get_sidebar_button_end()
if get_trade_method() in ['pm', 'both']:
get_trade_message_text()
get_trade_message_title()
get_trade_respond()
get_trade_age_msg()
get_trade_age_type() -> ['seconds', 'minutes', 'hours', 'days', 'months']
get_trade_same_msg()
get_trade_karma_msg()
get_trade_karma_type() -> ['comment', 'link', 'both']
get_trade_karma_limit()
get_class_use()
get_class_start()
get_class_limit()
get_class_ignore()
get_class_pattern()
get_class_increment()
In addition, will need to log results to logger, and store updated
user information in self.data_store if get_monitor_users() is True.
"""
if self.config_handler.get_trade_method() in ['pm', 'both']:
pass
if self.config_handler.get_trade_method() in ['post', 'both']:
pass
def _state_posts(self):
"""
Monitors and replies to previously unseen posts on the target
subreddit's /new page.
Will need to call the following methods from self.config_handler:
get_monitor_posts()
get_monitor_users()
get_monitor_format()
get_monitor_respond()
get_monitor_response()
"""
pass
def _state_flair(self):
"""
Responsible for verifying and setting user flair with regards to their
accounts on http://www.HeatWare.com.
Will need to call the following methods from self.config_handler:
get_heatware_method()
if get_heatware_method() in ['post', 'both']:
get_heatware_post_id()
get_heatware_post_text()
get_heatware_post_rate()
get_heatware_post_title()
get_heatware_post_sticky()
get_heatware_post_response()
if get_heatware_method() in ['pm', 'both']:
get_heatware_message_text()
get_heatware_message_title()
get_heatware_regex()
get_heatware_group()
get_heatware_respond()
Recall:
>>> import time, pprint
>>> self.heat_parse.parse('2')
>>> while len(self.heat_parse) < 1: time.sleep(1)
>>> results = {id_: info for id_, info in self.heat_parse}
>>> pprint.pprint(results['2'])
{'aliases': {'amdmb': {'heat23': None},
'anandtech bbs': {'heat23': 'http://forum.anandtech.com'},
'arstechnica': {'heat23': None},
'geekhack': {'heatware': None},
'techpowerup!': {'heatware': None},
'webhostingtalk': {'heat23': None}},
'evaluations': {334221: {'comments': 'Great transaction, he sent money '
'via paypal and I shipped upon '
'payment.',
'date': '06-30-2005',
'forum': 'anandtech bbs',
'user': 'floben'},
344973: {'comments': 'What can I say about the owner of '
'heatware besides the fact that it '
'was an awesome transaction. I had '
'no worries about shipping first, '
'and his great communication '
'throughout the transaction put me '
'at ease.',
'date': '08-17-2005',
'forum': 'anandtech bbs',
'user': 'jackson18249'},
345198: {'comments': 'Quick payment & good communication. '
'You cannot ask for a smoother '
'transaction!',
'date': '08-23-2005',
'forum': 'anandtech bbs',
'user': 'hkklife'},
356225: {'comments': 'Super-fast payment, prompt response '
'to PMs. There was a delivery delay '
'(because of Katrina) but buyer was '
'very patient and kept in touch. '
'Thanks!',
'date': '09-27-2005',
'forum': 'anandtech bbs',
'user': 'fornax'},
423266: {'comments': 'This was simply one of the best '
'transactions I have experienced on '
'Anandtech. I sent Heat23 a paypal '
'e-check (expecting for funds to '
'clear first) but he crosshipped '
'minutes later on a Saturday. Got '
'the package Monday morning in the '
'office. Awesome.',
'date': '08-14-2006',
'forum': 'anandtech bbs',
'user': 'jloor'},
425040: {'comments': 'Fast payment, smooth transaction... '
'Good guy to deal with! Thanks!',
'date': '08-23-2006',
'forum': 'anandtech bbs',
'user': 'Doctor Feelgood'},
425650: {'comments': 'Heat23 threw in a couple of '
'freebies and shipped everything out '
'lightspeed. Thanks Man!',
'date': '08-26-2006',
'forum': 'anandtech bbs',
'user': 'ScottyWH'},
425699: {'comments': 'This was a very smooth transaction. '
'Heat sent me payment and I sent him '
'the camera. I would gladly sell to '
'him again. Thanks!',
'date': '08-20-2006',
'forum': 'anandtech bbs',
'user': 'dak125'},
426236: {'comments': 'The transaction went great, seller '
'was the easy to deal with and the '
'shipping was fast. (Freebie '
'included)...Love to deal again in '
'the future...',
'date': '08-29-2006',
'forum': 'anandtech bbs',
'user': 'mackle'},
487916: {'comments': 'Good communication, paid via '
"Paypal, smooth deal. If you can\\'t "
'trust heat23, who can you trust?;)',
'date': '08-23-2007',
'forum': 'anandtech bbs',
'user': 'Tates'},
496656: {'comments': 'Nice guy to work with. His '
'contribution to the trading '
'community is definitely '
'appreicated!!! Thanks again heat. :)',
'date': '11-08-2007',
'forum': 'anandtech bbs',
'user': 'ELopes580'},
527657: {'comments': 'Though took a bit to get the deal '
'done, he was courteous, kept in '
'touch, and made the whole '
'experience awesome! Thanks for the '
"phone, it\\'s awesome!",
'date': '08-04-2008',
'forum': 'anandtech bbs',
'user': 'proxops-pete'},
621980: {'comments': 'Donation acknowledgement and thanks '
'received. Thanks for spending your '
'time building something to do good.',
'date': '07-11-2011',
'forum': 'heatware',
'user': 'AmboBartok'},
690634: {'comments': 'Got payment quickly, great '
'comunication. Would deal with again '
'anytime. A++++',
'date': '07-23-2014',
'forum': 'anandtech bbs',
'user': 'Sniper82'},
699942: {'comments': 'Receiver was packed very well, in '
'what appeared to be the original '
'box. This receiver was shipped from '
'CA to NY and was in beautiful '
'condition when it arrived. Heat23 '
'even included a couple HDMI cables. '
'The item was as described, shipped '
'promptly, packed very well, and is '
'working well as I type this. This '
'transaction could not have gone '
"better, and I\\'d definitely deal "
'with Heat23 again.',
'date': '03-03-2015',
'forum': 'anandtech bbs',
'user': 'NicePants42'}},
'location': 'Austin, TX',
'rating': {'negative': 0, 'neutral': 0, 'positive': 188}}
"""
if self.config_handler.get_heatware_method() in ['pm', 'both']:
pass
if self.config_handler.get_heatware_method() in ['post', 'both']:
pass
def shutdown(self):
self.heat_parse.kill()
def __repr__(self):
# This section is a carbon copy of the vanilla codebase.
# ( See: threading.Thread.__repr__ )
thread_status = 'initial'
if self._started.is_set():
thread_status = 'started'
self.is_alive()
if self._is_stopped:
thread_status = 'stopped'
if self._daemonic:
thread_status += ' daemon'
if self._ident is not None:
thread_status += ' %s' % self._ident
reddit_status = 'logged'
if self.is_logged_in():
reddit_status += '-in'
else:
reddit_status += '-out'
if self.is_oauth_session():
reddit_status += ' oauth2'
return "<%s.%s {'thread': (%s, %s), 'reddit': (%s, %s)} at %s>" % (
self.__class__.__module__, self.__class__.__name__,
self.name, thread_status, self.user, reddit_status, hex(id(self)))
class database_handler(shelve.DbfilenameShelf):
def __init__(self, data_file):
super(self.__class__, self).__init__(filename=data_file)
def get(self, key):
try:
return self[key.lower()]
except:
return {}
def set(self, key, val):
try:
assert(isinstance(val, dict))
cur = self.get(key.lower())
val = self.update(val, cur)
self[key.lower()] = val
return True
except:
return False
def remove(self, key):
try:
del self[key.lower()]
return True
except:
return False
def update(self, new_, orig):
for key, val in orig.items():
if isinstance(val, dict):
new_[key] = self.update(new_.get(key, {}), val)
else:
new_[key] = orig[key]
return new_
def terminate(self):
self.sync()
self.close()
class heatware_crawler(multiprocessing.Process):
def __init__(self, page_wait=0, deep_parse=False, rand_wait=False):
# TODO: See if heat is okay with maximum one request per sixty seconds.
# STATUS: Reached out to heat as of Aug 29; no response as of yet.
# The site's robots.txt (http://heatware.com/robots.txt) seems
# to allow any sort of automated crawling, but I won't
# implement the ability to perform a 'deep_parse' until I
# get confirmation from the man himself.
self._state = multiprocessing.Value('c', 0)
self.page_wait = max(60, page_wait)
self.sqrt_wait = math.sqrt(self.page_wait)
# TODO: See if heat is okay with deep crawling of his site.
self.deep_parse = False # deep_parse
if rand_wait:
self.rand_wait = lambda: random.uniform(self.sqrt_wait / 2.0,
self.sqrt_wait * 2.0)
else:
self.rand_wait = lambda: 0
self.next_time = time.time()
self.get_next_time = lambda: (
time.time() + self.page_wait + self.rand_wait())
self.get_page = urllib.request.urlopen
self.root_page = 'http://www.heatware.com/eval.php?id='
self.page_ext = '&pagenum=%i'
self.eval_ext = '&num_days=%i'
self.info_dict = {
# 'deep_parse': self.deep_parse,
'rating': {'positive': 0,
'neutral': 0,
'negative': 0},
'aliases': {},
'location': None,
'evaluations': []
}
self.subhead_map = {
'Evaluation Summary': {'function': self._summary,
'key': 'rating'},
'User Information': {'function': self._information,
'key': 'location'},
'Aliases': {'function': self._aliases,
'key': 'aliases'},
'Evaluations': {'function': self._evaluations,
'key': 'evaluations'}
}
self.text_clean = regex.compile(r'\s+', regex.UNICODE)
self.date_clean = regex.compile(r'\d{2}-\d{2}-\d{4}', regex.UNICODE)
self.info_queue = multiprocessing.Queue()
self.user_queue = multiprocessing.JoinableQueue()
super().__init__()
self.daemon = True
self.start()
def run(self):
while True:
self._state.value = b'i'
heatware_id = self.user_queue.get()
if heatware_id is Ellipsis:
break
else:
self._state.value = b'b'
information = self._parse(heatware_id)
self.info_queue.put((heatware_id, information))
self.user_queue.task_done()
self._state.value = b'd'
def parse(self, id_):
self.user_queue.put(id_)
def kill(self):
self.user_queue.put(Ellipsis)
def state(self):
if self._state.value == b'i':
return 'idle'
if self._state.value == b'b':
return 'busy'
if self._state.value == b'd':
return 'dead'
return 'none'
def is_idle(self):
return self._state.value == b'i'
def is_busy(self):
return self._state.value == b'b'
def is_dead(self):
return self._state.value == b'd'
def remaining_jobs(self):
# Not exact.
return self.user_queue.qsize()
def __nonzero__(self):
# Not reliable.
return self.info_queue.empty()
def __len__(self):
# Not exact.
return self.info_queue.qsize()
def __iter__(self):
try:
while True:
yield self.info_queue.get_nowait()
except:
raise StopIteration
def _parse(self, id_):
return self._extract(self.root_page + str(id_))
def _extract(self, url):
time.sleep(max(0, self.next_time - time.time()))
info = copy.deepcopy(self.info_dict)
page = self.get_page(url)
html = str(page.read())
self.next_time = self.get_next_time()
soup = bs4.BeautifulSoup(html, _BS4_PARSER)
for subhead in soup.find_all(class_='subhead'):
if subhead.text in self.subhead_map:
try:
info[self.subhead_map[subhead.text]['key']] = (
self.subhead_map[subhead.text]['function'](subhead,
soup))
except:
info[self.subhead_map[subhead.text]['key']] = (copy.deepcopy(
self.info_dict[self.subhead_map[subhead.text]['key']]))
return info
def _summary(self, spoonful, soup):
root = spoonful.parent
scores = root.find_all(class_='num')
summary = {}
for idx, item in enumerate(['positive', 'neutral', 'negative']):
try:
summary[item] = int(scores[idx].text)
except:
summary[item] = None
return summary
def _information(self, spoonful, soup):
root = spoonful.parent
info = root.find_all('div')
for idx in range(len(info) - 1):
prior, label = info[idx], info[idx + 1]
if label.text == 'Location':
return prior.text
return None
def _aliases(self, spoonful, soup):
root = spoonful.parent
links = {}
for alias in root.find_all('div'):
link = alias.find('a', href=True)
try:
alias, site = alias.text.split(' on ', 1)
alias = alias.lower()
if link:
links.setdefault(link.text.lower(), {}
).setdefault(alias, link.get('href'))
else:
links.setdefault(site.lower(), {}).setdefault(alias, None)
except:
pass
return links
def _evaluations(self, spoonful, soup):
root = spoonful.parent
evals = {}
for evalu in root.find_all(id=regex.compile(r'rp_[0-9]+')):
id_ = int(evalu.get('id').strip('rp_'))
info = {}
try:
info['user'] = self._clean(evalu.find('td').text)
except:
info['user'] = None
try:
info_string = soup.find(id=('row_%i' % id_)).text
date_match = self.date_clean.search(info_string)
info['date'] = self._clean(date_match.group(0))
date_span = date_match.span(0)
except:
info['date'] = None
date_span = None
if date_span:
try:
info['forum'] = self._clean(info_string[date_span[1]:]
).lower()
except:
info['forum'] = None
else:
info['forum'] = None
try:
for inner in evalu.find_all('strong'):
if 'Comments' in inner.text:
info['comments'] = self._clean(
inner.parent.text.split(None, 1)[1])
except:
info['comments'] = None
evals[id_] = info
return evals
def _clean(self, text):
_text = text.replace('\\t', '\t'
).replace('\\r', '\r'
).replace('\\n', '\n')
return self.text_clean.sub(' ', _text)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=('Automates flair monitoring '
"for reddit's trading "
'subreddits.'),
epilog=('Currently maintained by ' +
', '.join(coerce_reddit_handles()) +
'.'))
parser.add_argument('-is', '--interactive-shell', action='store_true',
help='run the bot with an interactive shell')
args = parser.parse_args()
if args.interactive_shell:
bot_prompt().cmdloop()
else:
bot()
|
gpl-2.0
| 7,135,524,112,241,807,000
| 48.879902
| 99
| 0.415147
| false
| 5.08436
| true
| false
| false
|
quaquel/EMAworkbench
|
ema_workbench/examples/cart_flu_example.py
|
1
|
1172
|
'''
Created on May 26, 2015
@author: jhkwakkel
'''
import matplotlib.pyplot as plt
import ema_workbench.analysis.cart as cart
from ema_workbench import ema_logging, load_results
ema_logging.log_to_stderr(level=ema_logging.INFO)
def classify(data):
# get the output for deceased population
result = data['deceased population region 1']
# if deceased population is higher then 1.000.000 people,
# classify as 1
classes = result[:, -1] > 1000000
return classes
# load data
fn = './data/1000 flu cases with policies.tar.gz'
results = load_results(fn)
experiments, outcomes = results
# extract results for 1 policy
logical = experiments['policy'] == 'no policy'
new_experiments = experiments[logical]
new_outcomes = {}
for key, value in outcomes.items():
new_outcomes[key] = value[logical]
results = (new_experiments, new_outcomes)
# perform cart on modified results tuple
cart_alg = cart.setup_cart(results, classify, mass_min=0.05)
cart_alg.build_tree()
# print cart to std_out
print(cart_alg.stats_to_dataframe())
print(cart_alg.boxes_to_dataframe())
# visualize
cart_alg.show_boxes(together=False)
cart_alg.show_tree()
plt.show()
|
bsd-3-clause
| -5,965,908,289,611,535,000
| 21.980392
| 61
| 0.726109
| false
| 3.167568
| false
| false
| false
|
kdart/pycopia3
|
net/pycopia/http/jsonrpc1.py
|
1
|
3487
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides a simple, general purpose JSON RPC v1.0 over HTTP.
TODO: Asynchronous client handling multiple connections at once.
"""
import json
from pycopia import urls
from pycopia.inet import httputils
from pycopia.http.client import (HTTPRequest, RequestResponseError)
class JSONError(Exception):
pass
class JSONRequestError(JSONError):
pass
class JSONResponseError(JSONError):
pass
def Counter():
i = 0
while True:
yield i
i += 1
class JSON1Method:
COUNTER = Counter() # class singleton
def __init__(self, name, params):
self.method = name
self.params = params
self.id = next(self.COUNTER)
def to_json(self):
return json.dumps({"method": self.method, "params": self.params, "id": self.id})
class SimpleJSONRPCClient:
def __init__(self, url, logfile=None):
self._baseurl = urls.UniversalResourceLocator(url)
self._cookiejar = httputils.CookieJar()
self._logfile = logfile
def call(self, path, query, method, args):
"""Call the remote method, return result.
"""
data = JSON1Method(method, args)
resp = self.post(path, data, query)
res = json.loads(resp.body.decode("utf-8"))
if res["id"] != data.id:
raise JSONRequestError("mismatched id")
err = res.get("error")
if err:
raise JSONResponseError((err["code"], err["message"]))
return res["result"]
def get(self, path, query=None):
url = self._baseurl.copy()
url.path = self._baseurl.path + path
headers = [httputils.Referer(self._baseurl), httputils.Connection("keep-alive")]
request = HTTPRequest(url, method="GET", query=query, cookiejar=self._cookiejar, extraheaders=headers)
resp = request.perform(self._logfile)
if resp.status.code != 200:
raise RequestResponseError(str(resp.status))
self._cookiejar.parse_mozilla_lines(resp.cookielist)
return resp
def post(self, path, data, query=None):
url = self._baseurl.copy()
url.path = self._baseurl.path + path
if query:
url.query = query
request = HTTPRequest(url, data, method="POST", cookiejar=self._cookiejar,
accept="application/json")
resp = request.perform(self._logfile)
if resp.status.code != 200:
raise RequestResponseError(str(resp.status))
self._cookiejar.parse_mozilla_lines(resp.cookielist)
return resp
@property
def cookies(self):
return self._cookiejar.get_setcookies()
def clear_cookies(self):
return self._cookiejar.clear()
if __name__ == "__main__":
m = JSON1Method("callme", ("maybe", 1))
print(m.to_json())
m = JSON1Method("callme", ("again", 2))
print(m.to_json())
|
apache-2.0
| -5,706,128,941,765,622,000
| 29.060345
| 110
| 0.641526
| false
| 3.878754
| false
| false
| false
|
ty-tal/python
|
tools/user_audit.py
|
1
|
2470
|
#!/usr/bin/python2.7 -Wd
'''
file:user_audit.py
use: audits users table and compares current date to last password update
if last update exceeds threshold send a notice to users
author: ty.talmadge@gmail.com
date: 20131002
connector: mysqldb because the SQL only changes per table layout
'''
# define imported libraries
import datetime,MySQLdb,smtplib
# import mimetext if you want to include a copy of the password update policy
from email.mime.text import MIMEText
# turn mail function off and on
# 0 is off 1 is on
mail_on=0
# define today and lookback day
lookback_days=90
today_day=datetime.datetime.today()
today_holdout=datetime.date.today()
day_format="%Y-%m-%d"
hour_format="%H:%M:%S"
this_day=today_day.strftime(day_format)
this_hour=today_day.strftime(hour_format)
today="%s %s" % (this_day,this_hour)
lookback=datetime.timedelta(days=lookback_days)
holdout=today_holdout-lookback
threshhold_d="%s %s" % (holdout,this_hour)
threshhold=str(threshhold_d)
# define msg as multipart, application and message to be sent to listed users
audit_file="/path/to/audit_message.txt"
ap=open(audit_file, 'rb')
msg=MIMEText(ap.read())
ap.close()
me='application_name@mycompany.com'
application_name='Your_Application'
#connect to mysql database
audit_db = MySQLdb.connect(host="localhost",user="some_user",passwd="some_password",db="some_db_schema")
# create cursor object
cursor = audit_db.cursor()
# query user table
cursor.execute("select name,full_name,password_created,email from users where password_created < '%s' order by name asc" % threshhold)
print "Auditing users starting %s and looking back %s days to %s.\n" % (today,lookback_days,threshhold)
print "The following users have not updated their passwords in the last %s days.\n " % lookback_days
# echo results if running in a scheduler, i.e. Control-M, Open-Scheduler, etc. so they will appear in sysout
# format the data so it is in readable columns
for row in cursor.fetchall():
pw_format=str(row[2])
if mail_on == '0':
# send an email to the users displayed
msg['Subject']='Password update reminder from %s' % application_name
msg['From']=me
msg['To']=row[3]
sendme=smtplib.SMTP('mail_server')
sendme=sendmail(me,[row[3]], msg.as_string())
sendme.quit()
else:
print row[0].ljust(30), " ", row[1].ljust(30), " ",pw_format.ljust(30), " ", row[3].ljust(30)
# close the database connection
audit_db.close()
# print the done message
print "\nFinished auditing user table.\n"
|
gpl-3.0
| 1,133,495,115,821,487,400
| 35.880597
| 134
| 0.738462
| false
| 3.166667
| false
| false
| false
|
sargentfrancesca/coming-up-roses
|
client_site/models.py
|
1
|
1300
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
# copy nano lib/python2.7/site-packages/pinax_theme_bootstrap folder
class Category(models.Model):
category_name = models.CharField(max_length=100)
category_description = models.TextField()
def __str__(self):
return "%s (%s)" % (self.category_name, self.category_description)
class Treatment(models.Model):
treatment_name = models.CharField(max_length=100)
treatment_price = models.DecimalField(max_digits=5, decimal_places=2)
treatment_descripton = models.TextField(null=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return "%s: %s" % (self.treatment_name, self.treatment_price)
class MailingList(models.Model):
user_name = models.CharField(max_length=100)
user_email = models.EmailField(max_length=254)
def __str__(self):
return "%s: %s" % (self.user_name, self.user_email)
class Image(models.Model):
image_filename = models.CharField(max_length=100)
image_title = models.CharField(max_length=64)
image_description = models.TextField(null=True)
image_folder = models.CharField(max_length=100, default="photo_shoot")
def __str__(self):
return "%s: %s [%s]" % (self.image_filename, self.image_title, self.image_description)
|
mit
| -7,982,091,275,539,703,000
| 31.525
| 88
| 0.736154
| false
| 3.037383
| false
| false
| false
|
mvcsantos/QGIS
|
python/plugins/processing/algs/qgis/RasterLayerHistogram.py
|
1
|
3219
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import QVariant
from qgis.core import QgsField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputHTML
from processing.tools import dataobjects
from processing.tools import raster
class RasterLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
PLOT = 'PLOT'
TABLE = 'TABLE'
BINS = 'BINS'
def defineCharacteristics(self):
self.name = 'Raster layer histogram'
self.group = 'Graphics'
self.addParameter(ParameterRaster(self.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BINS,
self.tr('Number of bins'), 2, None, 10))
self.addOutput(OutputHTML(self.PLOT, self.tr('Histogram')))
self.addOutput(OutputTable(self.TABLE, self.tr('Table')))
def processAlgorithm(self):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
nbins = self.getParameterValue(self.BINS)
outputplot = self.getOutputValue(self.PLOT)
outputtable = self.getOutputFromName(self.TABLE)
values = raster.scanraster(layer)
# ALERT: this is potentially blocking if the layer is too big
plt.close()
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
(n, bins, values) = plt.hist(valueslist, nbins)
fields = [QgsField('CENTER_VALUE', QVariant.Double),
QgsField('NUM_ELEM', QVariant.Double)]
writer = outputtable.getTableWriter(fields)
for i in xrange(len(values)):
writer.addRecord([str(bins[i]) + '-' + str(bins[i + 1]), n[i]])
plotFilename = outputplot + '.png'
lab.savefig(plotFilename)
f = open(outputplot, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
|
gpl-2.0
| -4,066,943,660,711,654,000
| 34.373626
| 75
| 0.558559
| false
| 4.379592
| false
| false
| false
|
Kelfast/mamba-framework
|
mamba/web/page.py
|
1
|
10483
|
# -*- test-case-name: mamba.test.test_web -*-
# Copyright (c) 2012 - 2013 Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for more details
"""
.. module: page
:platform: Unix, Windows
:synopsis: The Page object is the main web application entry point
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
from singledispatch import singledispatch
from twisted.web import static, server
from twisted.python import filepath
from twisted.python import log as twisted_log
from twisted.python.logfile import DailyLogFile
from mamba.utils.less import LessResource
from mamba.utils import log
from mamba.core import templating, resource
os = filepath.os
class Page(resource.Resource):
"""
This represents a full web page in mamba applications. It's usually
the root page of your web site/application.
The controllers for the routing system are eregistered here. We first
register any package shared controller because we want to overwrite
them if our application defines the same routes.
:param app: The Mamba Application that implements this page
:type app: :class:`~mamba.application.app.Application`
:param template_paths: additional template paths for resources
:param cache_size: the cache size for Jinja2 Templating system
:param loader: Jinja2 custom templating loader
"""
def __init__(self, app, template_paths=None, cache_size=50, loader=None):
resource.Resource.__init__(self)
# register log file if any
if (app.development is False and
app.already_logging is False and app.log_file is not None):
twisted_log.startLogging(DailyLogFile.fromFullPath(app.log_file))
self._assets = resource.Assets([os.getcwd() + '/static'])
self.template_paths = [
'application/view/templates',
'{}/templates/jinja'.format(
os.path.dirname(__file__).rsplit(os.sep, 1)[0]
)
]
# set managers
self._controllers_manager = app.managers.get('controller')
self._shared_controllers_manager = app.managers.get('packages')
# register controllers
self.register_shared_controllers()
self.register_controllers()
# containers
self.containers = {
'styles': static.Data('', 'text/css'),
'scripts': static.Data('', 'text/javascript')
}
# register containers
self.putChild('styles', self.containers['styles'])
self.putChild('scripts', self.containers['scripts'])
# insert stylesheets and scripts
self.insert_stylesheets()
self.insert_scripts()
# register service ponger
self.putChild('_mamba_pong', static.Data('PONG', 'text/plain'))
# static accessible data (scripts, css, images, and others)
self.putChild('assets', self._assets)
# other initializations
self.generate_dispatches()
self.initialize_templating_system(template_paths, cache_size, loader)
def getChild(self, path, request):
"""
If path is an empty string or index, render_GET should be called,
if not, we just look at the templates loaded from the view templates
directory. If we find a template with the same name than the path
then we render that template.
.. caution::
If there is a controller with the same path than the path
parameter then it will be hidden and the template in templates
path should be rendered instead
:param path: the path
:type path: str
:param request: the Twisted request object
"""
if path == '' or path is None or path == 'index':
return self
for template in self.environment.list_templates():
if path == template.rsplit('.', 1)[0]:
return self
return resource.Resource.getChild(self, path, request)
def render_GET(self, request):
"""Renders the index page or other templates of templates directory
"""
if not request.prepath[0].endswith('.html'):
request.prepath[0] += '.html'
try:
template = templating.Template(
self.environment, template=request.prepath[0]
)
return template.render(**self.render_keys).encode('utf-8')
except templating.TemplateNotFound:
try:
template = templating.Template(
self.environment, template='index.html'
)
return template.render(**self.render_keys).encode('utf-8')
except templating.TemplateNotFound:
pass
template = templating.Template(
self.environment,
template='root_page.html'
)
return template.render(**self.render_keys).encode('utf-8')
def generate_dispatches(self):
"""Generate singledispatches
"""
self.add_template_paths = singledispatch(self.add_template_paths)
self.add_template_paths.register(str, self._add_template_paths_str)
self.add_template_paths.register(list, self._add_template_paths_list)
self.add_template_paths.register(tuple, self._add_template_paths_tuple)
def add_script(self, script):
"""Adds a script to the page
"""
self.putChild(script.prefix, static.File(script.path))
def register_controllers(self):
"""Add a child for each controller in the ControllerManager
"""
for controller in self._controllers_manager.get_controllers().values():
self._register_controller_module(controller)
self._build_controllers_tree()
def register_shared_controllers(self):
"""
Add a child for each shared package controller. If the package
includes a static files directory we add an asset for it
.. versionadded:: 0.3.6
"""
if self._shared_controllers_manager is None:
return
for package in self._shared_controllers_manager.packages.values():
static_data = filepath.FilePath(
'{}/static'.format(os.path.normpath(package['path']))
)
if static_data.exists():
self._assets.add_paths([static_data.path])
real_manager = package.get('controller')
if real_manager is None:
continue
for controller in real_manager.get_controllers().values():
self._register_controller_module(controller, True)
real_manager.build_controller_tree()
def initialize_templating_system(self, template_paths, cache_size, loader):
"""Initialize the Jinja2 templating system for static HTML resources
"""
if self._shared_controllers_manager is not None:
for package in self._shared_controllers_manager.packages.values():
self.add_template_paths('{}/view/templates'.format(
package.get('path'))
)
if template_paths is not None:
self.add_template_paths(template_paths)
if loader is None:
loader = templating.FileSystemLoader
self.environment = templating.Environment(
autoescape=lambda name: (
name.rsplit('.', 1)[1] == 'html' if name is not None else False
),
cache_size=cache_size,
loader=loader(self.template_paths)
)
def insert_stylesheets(self):
"""Insert stylesheets into the HTML
"""
for name, style in self._styles_manager.get_styles().iteritems():
if style.less:
self.containers['styles'].putChild(
name, LessResource(style.path)
)
continue
self.containers['styles'].putChild(name, static.File(style.path))
def insert_scripts(self):
"""Insert scripts to the HTML
"""
for name, script in self._scripts_manager.get_scripts().iteritems():
self.containers['scripts'].putChild(name, static.File(script.path))
def run(self, port=8080):
"""
Method to run the application within Twisted reactor
This method exists for testing purposes only and fast
controller test-development-test workflow. In production you
should use twistd
:param port: the port to listen
:type port: number
"""
from twisted.internet import reactor
factory = server.Site(self)
reactor.listenTCP(port, factory)
reactor.run()
def add_template_paths(self, paths):
"""Add template paths to the underlying Jinja2 templating system
"""
raise RuntimeError(
'{} type for paths can not be handled'.format(type(paths)))
def _add_template_paths_str(self, paths):
"""Append template paths for single str template path given
"""
self.template_paths.append(paths)
def _add_template_paths_list(self, paths):
"""Adds the given template paths list
"""
self.template_paths += paths
def _add_template_paths_tuple(self, paths):
"""Adds the given template paths tuple
"""
self.template_paths += list(paths)
def _register_controller_module(self, controller, shared=False):
"""Efectively register the controller in the routing system
:param controller: the controller to be registered
:type controller: :class:`mamba.application.controller.Controller`
:param shaed: is this a shared controller?
:type shared: bool
"""
log.info(
'Registering {} controller {} with route {} {}({})'.format(
'shared' if shared else '',
controller.get('object').name,
controller.get('object').get_register_path(),
controller.get('object').name,
controller.get('module')
)
)
if controller.get('object').__parent__ is None:
self.putChild(
controller.get('object').get_register_path(),
controller.get('object')
)
def _build_controllers_tree(self):
"""Build the full controllers tree
"""
self._controllers_manager.build_controller_tree()
__all__ = ['Page']
|
gpl-3.0
| -775,358,922,251,888,300
| 32.707395
| 79
| 0.610417
| false
| 4.522433
| true
| false
| false
|
geonition/base_page
|
base_page/admin.py
|
1
|
1467
|
"""
Admin classes for base_page related models
"""
from django.contrib.gis import admin
from django.core.urlresolvers import reverse_lazy
from base_page.forms import OrganizationSettingForm
from base_page.models import Feedback
from base_page.models import OrganizationSetting
from django.conf import settings
from modeltranslation.admin import TranslationAdmin
class OrganizationSettingAdmin(TranslationAdmin, admin.OSMGeoAdmin):
"""
The OrganizationSettingAdmin handles the organization specific settings
for the site.
"""
list_display = ('organization_name',
'title',
'blurb',
'provider',)
default_lon = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lon': 0})['default_lon']
default_lat = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lat': 0})['default_lat']
default_zoom = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_zoom': 4})['default_zoom']
form = OrganizationSettingForm
openlayers_url = '%s%s' % (getattr(settings, 'STATIC_URL', '/'), 'js/libs/OpenLayers.js')
extra_js = (reverse_lazy('osmextra'),)
admin.site.register(Feedback)
admin.site.register(OrganizationSetting, OrganizationSettingAdmin)
|
mit
| 6,753,025,144,731,848,000
| 37.605263
| 93
| 0.632584
| false
| 4.486239
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.