text stringlengths 0 27.1M | meta dict |
|---|---|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def normal_distribution(mean=0, sigma=1, samples=1):
return np.random.normal(mean, sigma, samples)
def get_cumulative_data(data, func):
return [func(data[:i]) for i in range(1, len(data) - 1)]
def get_cumulative_mean_and_variance(data):
return get_cumulative_data(data, np.mean), get_cumulative_data(data, np.var)
def visualize_gaussian_distributions(means, sigmas, samples):
data = np.zeros(shape=(len(means), samples))
for i in range(len(means)):
data[i, :] = normal_distribution(means[i], sigmas[i], samples)
data = np.sum(data, axis=0)
ax_0 = sns.distplot(data)
ax_0.set_title('Gaussian Distribution with parameters: mean = {}, sigma = {}'.format(means, sigmas))
ax_0.set_xlabel('x')
ax_0.set_ylabel('y')
plt.show()
cumulative_means, cumulative_variances = get_cumulative_mean_and_variance(data)
f, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, sharey=True)
ax1.set_title('Mean depends on samples.')
ax1.set_xlabel('Number of samples')
ax1.set_ylabel('Mean')
ax1.axhline(y=np.sum(means), color='red')
ax1.plot(cumulative_means)
ax2.set_title('Variance depends on samples.')
ax2.set_xlabel('Number of samples')
ax2.set_ylabel('Variance')
ax2.plot(cumulative_variances)
ax2.axhline(y=np.sum([x ** 2 for x in sigmas]), color='red')
plt.show()
visualize_gaussian_distributions(means=[0,2,0, 4], sigmas=[2, 0.5, 1,12], samples=5000)
# visualize_gaussian_distributions(means=[5, 10], sigmas=[1, 1], samples=5000)
# visualize_gaussian_distributions(means=[5, 10], sigmas=[1, 1], samples=5000)
| {
"alphanum_fraction": 0.6801385681,
"author": null,
"avg_line_length": 33.3076923077,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7ca5d9a739bf6d17a5eca4ad296d0ad202ffe9c1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c835a1bdf7ab1b2e58bcf90ae02b7405c9c72977",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "PiotrGrzybowski/ProbabilisticMachineLearning",
"max_forks_repo_path": "Lab2/task1.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c835a1bdf7ab1b2e58bcf90ae02b7405c9c72977",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "PiotrGrzybowski/ProbabilisticMachineLearning",
"max_issues_repo_path": "Lab2/task1.py",
"max_line_length": 105,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c835a1bdf7ab1b2e58bcf90ae02b7405c9c72977",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "PiotrGrzybowski/ProbabilisticMachineLearning",
"max_stars_repo_path": "Lab2/task1.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 485,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1732
} |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 14:19:14 2013
@author: mlawson
Wind and water velocity distributions
Classses:
Distributions - Base class that allows flow distribution PDFs and CFDs to be plotted
plotpdf - function to plot the probability distribution function
u: bin values
p: probability
plodcdf - function to plot the cumulative distribution
u: bin values
p: probability
Raleigh - Rayeigh flow distribution
Weibull - Weibull flow distribution
PowerLaw - functionto calculate simple velocity profiles from power law
"""
from pylab import *
from scipy.special import gamma as gam
interactive(True)
class DistributionPlots(object):
def plotcdf(self):
figure('CDF')
plot(self.u,self.F)
xlabel('Wind Speed')
ylabel('Cumulative Distribution Function')
title(self.distType + ' Distribution')
def plotpdf(self):
figure('PDF')
plot(self.u,self.p)
xlabel('Wind Speed')
ylabel('Probability Distribution Function')
title(self.distType + ' Distribution')
class Raleigh(DistributionPlots):
def __init__(self,uBar=None,u=None):
self.uBar = uBar
self.distType = 'Raleigh'
self.u = u
self.p = pi/2.0 * (self.u/self.uBar**2.0) * exp(-pi/4.0*(self.u/self.uBar)**2)
self.F = 1-exp(-pi/4.0*(self.u/self.uBar)**2)
class Weibull(DistributionPlots):
def __init__(self,k=2.0,uBar=1.0,u=1.0):
self.distType = 'Weibull'
self.k = k
self.u = u
self.uBar = uBar
self.c = uBar/math.gamma(1.0+1.0/self.k)
self.p = (self.k/self.c)*(self.u/self.c)**(self.k-1.0)*exp(-(self.u/self.c)**self.k)
self.F = 1.0-exp(-(self.u/self.c)**self.k)
def update(self,k=None,uBar=None,u=None):
if k is not None:
self.k = k
else:
pass
if uBar is not None:
self.u = u
else:
pass
if u is not None:
self.uBar = uBar
else:
pass
self.c = self.uBar/math.gamma(1.0+1.0/self.k)
self.p = (self.k/self.c)*(self.u/self.c)**(self.k-1.0)*exp(-(self.u/self.c)**self.k)
self.F = 1.0-exp(-(self.u/self.c)**self.k)
class PowerLaw(object):
def __init__(self,alpha=1.0/7.0,zr=None,ur=None):
self.alpha = alpha
self.zr = zr
self.ur = ur
def calc_ux(self,zx=None):
self.zx = zx
self.ux = self.ur*(self.zx/self.zr)**self.alpha
def calc_zx(self,ux=None):
self.ux = ux
self.zx = self.zr*(self.ux/self.ur)**(1/self.alpha)
def plot(self):
fig_PowerLaw = figure('Power Law')
plot(ur,zr,'xk',markersize=10,mew=5,label='reference point')
plot(ux,xz,'xr',markersize=10,mew=5,lavel='reference point')
legend()
if __name__ == '__main__':
# Determine histrogram, k factor and uBar given in "Wind Data Summary" provided by AWS Truepower, LLC
u = linspace(0,30,100)
weibull = fd.Weibull(k=2.09,uBar=7.3,u=u)
weibull.plotpdf()
weibull.plotcdf()
# Plot the digitized data from "Wind Data Summary" provided by AWS Truepower, LLC
#digitizedData_weib = loadtxt('/home/mlawson/Dropbox/NREL/projects/freshwaterFOA/windData/weibData.dat')
#digitizedData_hist = loadtxt('/home/mlawson/Dropbox/NREL/projects/freshwaterFOA/windData/expData.dat')
#plot(digitizedData_weib[:,0],digitizedData_weib[:,1])
#plot(digitizedData_hist[:,0],digitizedData_hist[:,1])
| {
"alphanum_fraction": 0.6098176718,
"author": null,
"avg_line_length": 31.2719298246,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f5f1b1d6ac2076b735e245fbbbbd476417ab858d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "875ff607727ab37006d7b3cb793f1dd97c538d1b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "lawsonro3/python_scripts",
"max_forks_repo_path": "python_scripts/flow_distributions/flow_distributions.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "875ff607727ab37006d7b3cb793f1dd97c538d1b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "lawsonro3/python_scripts",
"max_issues_repo_path": "python_scripts/flow_distributions/flow_distributions.py",
"max_line_length": 108,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "875ff607727ab37006d7b3cb793f1dd97c538d1b",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "lawsonro3/python_scripts",
"max_stars_repo_path": "python_scripts/flow_distributions/flow_distributions.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1011,
"path": null,
"reason": "from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3565
} |
# These tests are mostly copies of the flip_sign.jl tests for GreaterToLess
using Test
using MathOptInterface
const MOI = MathOptInterface
const MOIT = MathOptInterface.Test
const MOIU = MathOptInterface.Utilities
const MOIB = MathOptInterface.Bridges
include("../utilities.jl")
mock = MOIU.MockOptimizer(MOIU.UniversalFallback(MOIU.Model{Float64}()))
config = MOIT.TestConfig()
@testset "GreaterToInterval" begin
bridged_mock = MOIB.Constraint.GreaterToInterval{Float64}(mock)
MOIT.basic_constraint_tests(
bridged_mock, config,
include = [(F, S)
for F in [MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64}]
for S in [MOI.GreaterThan{Float64}]])
MOIU.set_mock_optimize!(mock,
(mock::MOIU.MockOptimizer) -> MOIU.mock_optimize!(mock, [0.0, 0.0]),
(mock::MOIU.MockOptimizer) -> MOIU.mock_optimize!(mock, [100.0, 0.0]),
(mock::MOIU.MockOptimizer) -> MOIU.mock_optimize!(mock, [100.0, -100.0]))
MOIT.linear6test(bridged_mock, config)
ci = first(MOI.get(bridged_mock, MOI.ListOfConstraintIndices{MOI.ScalarAffineFunction{Float64}, MOI.GreaterThan{Float64}}()))
@testset "$attr" for attr in [MOI.ConstraintPrimalStart(), MOI.ConstraintDualStart()]
@test MOI.supports(bridged_mock, attr, typeof(ci))
MOI.set(bridged_mock, attr, ci, 2.0)
@test MOI.get(bridged_mock, attr, ci) ≈ 2.0
end
test_delete_bridge(bridged_mock, ci, 2,
((MOI.ScalarAffineFunction{Float64},
MOI.Interval{Float64}, 0),
))
end
@testset "LessToInterval" begin
bridged_mock = MOIB.Constraint.LessToInterval{Float64}(mock)
MOIT.basic_constraint_tests(
bridged_mock, config,
include = [(F, S)
for F in [MOI.SingleVariable, MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64}]
for S in [MOI.LessThan{Float64}]])
MOIU.set_mock_optimize!(mock,
(mock::MOIU.MockOptimizer) -> MOIU.mock_optimize!(mock,
MOI.OPTIMAL, (MOI.FEASIBLE_POINT, [1.0]),
MOI.FEASIBLE_POINT,
(MOI.ScalarAffineFunction{Float64}, MOI.Interval{Float64}) => [-1.0]
),
(mock::MOIU.MockOptimizer) -> MOIU.mock_optimize!(mock,
MOI.OPTIMAL, (MOI.FEASIBLE_POINT, [2.0]),
MOI.FEASIBLE_POINT,
(MOI.ScalarAffineFunction{Float64}, MOI.Interval{Float64}) => [-1.0]
)
)
MOIT.solve_set_scalaraffine_lessthan(bridged_mock, config)
MOIU.set_mock_optimize!(mock,
(mock::MOIU.MockOptimizer) -> MOIU.mock_optimize!(mock,
MOI.OPTIMAL, (MOI.FEASIBLE_POINT, [1.0]),
MOI.FEASIBLE_POINT,
(MOI.ScalarAffineFunction{Float64}, MOI.Interval{Float64}) => [-1.0]
),
(mock::MOIU.MockOptimizer) -> MOIU.mock_optimize!(mock,
MOI.OPTIMAL, (MOI.FEASIBLE_POINT, [0.5]),
MOI.FEASIBLE_POINT,
(MOI.ScalarAffineFunction{Float64}, MOI.Interval{Float64}) => [-0.5]
)
)
MOIT.solve_coef_scalaraffine_lessthan(bridged_mock, config)
ci = first(MOI.get(bridged_mock, MOI.ListOfConstraintIndices{MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64}}()))
@testset "$attr" for attr in [MOI.ConstraintPrimalStart(), MOI.ConstraintDualStart()]
@test MOI.supports(bridged_mock, attr, typeof(ci))
MOI.set(bridged_mock, attr, ci, 2.0)
@test MOI.get(bridged_mock, attr, ci) ≈ 2.0
end
test_delete_bridge(bridged_mock, ci, 1,
((MOI.ScalarAffineFunction{Float64},
MOI.Interval{Float64}, 0),))
end
# Define a dummy optimizer that only supports intervals
# and use it in the below unmocked test
mutable struct Optimizer <: MOI.AbstractOptimizer
function Optimizer()
return new()
end
end
MOI.get(model::Optimizer, ::MOI.SolverName) = "OnlyIntervalOptimizer"
MOI.supports_constraint(::Optimizer, ::Type{MOI.ScalarAffineFunction{Float64}}, ::Type{MOI.Interval{Float64}}) = true
@testset "GreaterOrLessToInterval_unmocked" begin
# model supports Interval but not LessThan or GreaterThan
model = Optimizer()
@test MOI.supports_constraint(model, MOI.ScalarAffineFunction{Float64}, MOI.Interval{Float64})
@test !MOI.supports_constraint(model, MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64})
@test !MOI.supports_constraint(model, MOI.ScalarAffineFunction{Float64}, MOI.GreaterThan{Float64})
# bridged model supports all
bridged = MOIB.Constraint.GreaterToInterval{Float64}(MOIB.Constraint.LessToInterval{Float64}(model))
@test MOI.supports_constraint(bridged, MOI.ScalarAffineFunction{Float64}, MOI.Interval{Float64})
@test MOI.supports_constraint(bridged, MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64})
@test MOI.supports_constraint(bridged, MOI.ScalarAffineFunction{Float64}, MOI.GreaterThan{Float64})
# bridged model with Bridges.full_bridge_optimizer
bridged2 = MOIB.full_bridge_optimizer(model, Float64)
@test MOI.supports_constraint(bridged2, MOI.ScalarAffineFunction{Float64}, MOI.Interval{Float64})
@test MOI.supports_constraint(bridged2, MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64})
@test MOI.supports_constraint(bridged2, MOI.ScalarAffineFunction{Float64}, MOI.GreaterThan{Float64})
end
| {
"alphanum_fraction": 0.6782592862,
"author": null,
"avg_line_length": 41.9236641221,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "ea7f0cd8d34720491f6631e35bbe9bbdc8437416",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "08a3639cf91a7b75e99534c5070f5d6290f61005",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "amburosesekar/MathOptInterface.jl",
"max_forks_repo_path": "test/Bridges/Constraint/ltgt_to_interval.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "08a3639cf91a7b75e99534c5070f5d6290f61005",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "amburosesekar/MathOptInterface.jl",
"max_issues_repo_path": "test/Bridges/Constraint/ltgt_to_interval.jl",
"max_line_length": 129,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "08a3639cf91a7b75e99534c5070f5d6290f61005",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "amburosesekar/MathOptInterface.jl",
"max_stars_repo_path": "test/Bridges/Constraint/ltgt_to_interval.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1577,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5492
} |
using GeometricIntegrators
using ChargedParticleDynamics.GuidingCenter4d.TokamakFastSurface
using GeometricExamples
const Δt = 10.
const ntime = 2500
const nx = 200
const ny = 200
const nsave = 5
const nplot = 5
const run_id = "poincare_2nd_fast_vprk_dt10"
include("guiding_center_4d_settings_vprk.jl")
tableau_list = get_tableau_list_vprk_projection()
pinv = guiding_center_4d_iode_poincare_invariant_2nd(Δt, nx, ny, ntime, nsave)
include("guiding_center_4d_fast_poincare_invariant_2nd.jl")
| {
"alphanum_fraction": 0.8070175439,
"author": null,
"avg_line_length": 24.4285714286,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "35e14875184175a031b22f31eeda58aedeef2a45",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ff6d5a27e2f14dedaad0d45a1e53fe953b0a4519",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DDMGNI/GeometricExamples.jl",
"max_forks_repo_path": "examples/guiding_center_4d/poincare_invariant_2nd/guiding_center_4d_fast_poincare_invariant_2nd_vprk_dt10.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ff6d5a27e2f14dedaad0d45a1e53fe953b0a4519",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DDMGNI/GeometricExamples.jl",
"max_issues_repo_path": "examples/guiding_center_4d/poincare_invariant_2nd/guiding_center_4d_fast_poincare_invariant_2nd_vprk_dt10.jl",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ff6d5a27e2f14dedaad0d45a1e53fe953b0a4519",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DDMGNI/GeometricExamples.jl",
"max_stars_repo_path": "examples/guiding_center_4d/poincare_invariant_2nd/guiding_center_4d_fast_poincare_invariant_2nd_vprk_dt10.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 177,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 513
} |
#!/usr/bin/env python
# Downsampling is done on shifted anomaly and maps
import pathlib
import numpy as np
import glob
import parse
from tqdm import tqdm # for progress bars
converted_data_dir = pathlib.Path('input_data')
master_mask_2D_shifted = np.load(converted_data_dir / "master_mask_2D_shifted-v2.npy")
evaluation_mask_3D_shifted = np.load(converted_data_dir / "evaluation_mask_3D_shifted-v2.npy")
(duration, height, width) = evaluation_mask_3D_shifted.shape
# Integer factor for downsampling
factor = 3 # change this and rerun, file names are uniquely generated
assert(width % factor == 0)
new_width = width // factor
assert(height % factor == 0)
new_height = height // factor
#Downsampling master mask
master_mask_2D_shifted_ds = np.zeros((new_height, new_width), dtype=np.float32)
for row in range(new_height):
for col in range(new_width):
master_mask_2D_shifted_ds.itemset((row, col),
master_mask_2D_shifted[row*factor:(row+1)*factor,col*factor:(col+1)*factor].astype(np.float32).sum() / (factor**2))
fname = "master_mask_2D_shifted-v2_ds_" + str(factor) + "x" + str(factor)
np.save(converted_data_dir / fname, master_mask_2D_shifted_ds)
nan_mask_3D_shifted = np.load(converted_data_dir / "nan_mask_3D_shifted-v2.npy")
imputed_anom_3D_shifted = np.load(converted_data_dir / "imputed_anom_3D_shifted-v2.npy")
#Downsampling evaluation mask
evaluation_mask_3D_shifted_ds = np.zeros((duration, new_height, new_width), dtype=np.float32)
imputed_anom_3D_shifted_ds_on_em = np.zeros((duration, new_height, new_width), dtype=np.float32)
print("Downsampling evaluation data...")
for t, eval_mask_shifted in tqdm(enumerate(evaluation_mask_3D_shifted), total=duration):
assert((eval_mask_shifted & ~master_mask_2D_shifted).all()==False)
for row in range(new_height):
for col in range(new_width):
num_of_cells = eval_mask_shifted[row*factor:(row+1)*factor, col*factor:(col+1)*factor].sum()
evaluation_mask_3D_shifted_ds.itemset((t, row, col), num_of_cells / (factor ** 2))
if(num_of_cells > 0):
imputed_anom_3D_shifted_ds_on_em.itemset((t, row, col),
imputed_anom_3D_shifted[t, row*factor:(row+1)*factor, col*factor:(col+1)*factor].sum() /
num_of_cells)
fname = converted_data_dir / ("evaluation_mask_3D_shifted-v2_ds_" + str(factor) + "x" + str(factor))
np.save(fname, evaluation_mask_3D_shifted_ds)
fname = converted_data_dir / ("imputed_anom_3D_shifted-v2_ds_" + str(factor) + "x" + str(factor))
np.save(fname, imputed_anom_3D_shifted_ds_on_em)
#Downsampling nan mask
nan_mask_3D_shifted_ds = np.zeros((duration, new_height, new_width), dtype=np.float32)
print("Downsampling nan mask...")
for t, nan_mask_shifted in tqdm(enumerate(nan_mask_3D_shifted), total=duration):
assert((nan_mask_shifted & ~master_mask_2D_shifted).all()==False)
for row in range(new_height):
for col in range(new_width):
num_of_cells = nan_mask_shifted[row*factor:(row+1)*factor, col*factor:(col+1)*factor].sum()
nan_mask_3D_shifted_ds.itemset((t, row, col), num_of_cells / (factor ** 2))
fname = "nan_mask_3D_shifted-v2_ds_" + str(factor) + "x" + str(factor)
np.save(converted_data_dir / fname, nan_mask_3D_shifted_ds)
#Downsampling multiple training and validation masks
print("Downsampling multiple training and validation masks...")
base_training_mask_file_name = "training_mask_3D_shifted-v2"
for training_mask_file_name in glob.glob(str(converted_data_dir / (base_training_mask_file_name + "*"))):
training_mask_pathlib_name = pathlib.Path(training_mask_file_name).stem
# Skips training masks that are allready downsampled
if parse.parse(base_training_mask_file_name + "-{N1:g}-{N2:g}", training_mask_pathlib_name) == None :
continue
print(" Working on ", training_mask_pathlib_name, "...")
training_mask_3D_shifted = np.load(training_mask_file_name)
# Downsampling training mask
training_mask_3D_shifted_ds = np.zeros((duration, new_height, new_width), dtype=np.float32)
imputed_anom_3D_shifted_ds_on_tm = np.zeros((duration, new_height, new_width), dtype=np.float32)
for t, train_mask_shifted in tqdm(enumerate(training_mask_3D_shifted), total=duration):
assert((train_mask_shifted & ~master_mask_2D_shifted).all()==False)
for row in range(new_height):
for col in range(new_width):
num_of_cells = train_mask_shifted[row*factor:(row+1)*factor, col*factor:(col+1)*factor].sum()
training_mask_3D_shifted_ds.itemset((t, row, col), num_of_cells / (factor ** 2))
if(num_of_cells > 0):
imputed_anom_3D_shifted_ds_on_tm.itemset((t, row, col),
imputed_anom_3D_shifted[t, row*factor:(row+1)*factor, col*factor:(col+1)*factor].sum() /
num_of_cells)
assert not np.isnan(imputed_anom_3D_shifted_ds_on_tm).any()
fname = converted_data_dir / (str(training_mask_pathlib_name) + "_ds_" + str(factor) + "x" + str(factor))
np.save(fname, training_mask_3D_shifted_ds)
# Downsampling validation mask
validation_mask_3D_shifted_ds = np.zeros((duration, new_height, new_width), dtype=np.float32)
imputed_anom_3D_shifted_ds_on_vm = np.zeros((duration, new_height, new_width), dtype=np.float32)
for t, valid_mask_shifted in tqdm(enumerate(evaluation_mask_3D_shifted & ~training_mask_3D_shifted), total=duration):
assert((valid_mask_shifted & ~master_mask_2D_shifted).all()==False)
for row in range(new_height):
for col in range(new_width):
num_of_cells = valid_mask_shifted[row*factor:(row+1)*factor, col*factor:(col+1)*factor].sum()
validation_mask_3D_shifted_ds.itemset((t, row, col), num_of_cells / (factor ** 2))
if(num_of_cells > 0):
imputed_anom_3D_shifted_ds_on_vm.itemset((t, row, col),
imputed_anom_3D_shifted[t, row*factor:(row+1)*factor, col*factor:(col+1)*factor].sum() /
num_of_cells)
assert not np.isnan(imputed_anom_3D_shifted_ds_on_vm).any()
fname = converted_data_dir / ("validation_mask_3D_shifted-v2_ds_" + str(factor) + "x" + str(factor) + "_from_" + str(training_mask_pathlib_name))
np.save(fname, validation_mask_3D_shifted_ds)
| {
"alphanum_fraction": 0.7062625019,
"author": null,
"avg_line_length": 50.3798449612,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5a6e7c7de216aa8cba838995189952de485317e6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-05-03T21:34:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-03T21:34:24.000Z",
"max_forks_repo_head_hexsha": "74b9038694f8e221bef3c26e898aa25e2baa6497",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "BlackBox-EVA2019/BlackBox",
"max_forks_repo_path": "downsample_bitmaps.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "74b9038694f8e221bef3c26e898aa25e2baa6497",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "BlackBox-EVA2019/BlackBox",
"max_issues_repo_path": "downsample_bitmaps.py",
"max_line_length": 149,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "74b9038694f8e221bef3c26e898aa25e2baa6497",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "BlackBox-EVA2019/BlackBox",
"max_stars_repo_path": "downsample_bitmaps.py",
"max_stars_repo_stars_event_max_datetime": "2020-10-31T08:10:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-31T08:10:14.000Z",
"num_tokens": 1751,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6499
} |
# Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this module will be included in the api
"""
import xarray as xa
import numpy as np
from cngi.image import imageUtility as iu
xa.set_options(keep_attrs=True)
def moments(ds, **kwargs):
"""
Collapse an n-dimensional image cube into a moment by taking a linear combination of individual planes
.. note::
This implementation still needs to implement additional moment codes, and verify behavior of implemented moment codes.
Parameters
----------
ds : xarray.core.dataset.Dataset
input Image Dataset
axis : str, optional
specified axis along which to reduce for moment generation, Default='chan'
code : int, optional
number that selects which moment to calculate from the following list
-1 - mean value of the spectrum (default)
0 - integrated value of the spectrum
1 - intensity weighted coordinate; traditionally used to get ’velocity fields’
2 - intensity weighted dispersion of the coordinate; traditionally used to get ’velocity dispersion’
3 - median of I
4 - median coordinate
5 - standard deviation about the mean of the spectrum
6 - root mean square of the spectrum
7 - absolute mean deviation of the spectrum
8 - maximum value of the spectrum
9 - coordinate of the maximum value of the spectrum
10 - minimum value of the spectrum
11 - coordinate of the minimum value of the spectrum
**kwargs
Arbitrary keyword arguments
Returns
-------
xarray.core.dataset.Dataset
output Image
"""
# input parameter checking
# moment: int array: a List of moments to compute
if 'moment' in kwargs.keys():
moment = kwargs['moment']
assert np.min(moment) in range(-1,12), "Input to moment parameter must be between -1 and 11"
assert np.max(moment) in range(-1,12)
else:
print("No valid input code detected, assuming default to calculate all image moments)")
moment = np.arange(-1,12)
# axis: string int: the moment axis: ra, dec, lat, long, spectral or stokes.
if 'axis' in kwargs.keys():
axis = kwargs['axis']
else:
print("No valid axis is specified, set default 'chan')")
axis='chan'
# chans: string, Channels to use. Default is to use all channels.
if ('chans' in kwargs.keys()):
chans = iu.selectedchannels(chans = kwargs['chans'],shapeLength = ds.dims['chan'])
else:
print("No channel is specified. Set to default -1 to use all channels")
chans = np.arange(ds.dims['chan'])
#This factor is related to the width (in world coordinate units) of a pixel along the moment axis
# light speed in kilometer per second
lightSpeed = 2.99792458*pow(10,5)
f0 = float(ds.attrs['rest_frequency'].replace('hz', ''))
v = (1 - ds.coords[axis] / f0) * lightSpeed
deltaV = (ds.coords[axis].values[1]-ds.coords[axis].values[0])*lightSpeed / f0
intensity=ds.IMAGE[:,:,chans,:]
# moments calculation
if -1 in moment or 0 in moment or 1 in moment or 2 in moment:
#ds["MOMENTS_AVERAGE"]=intensity.mean(dim=axis)
ds["MOMENTS_AVERAGE"] = intensity.sum(dim=axis) / intensity.shape[2]
ds["MOMENTS_INTERGRATED"]=intensity.sum(dim=axis)*deltaV
sum1 = 0
for i in range(intensity.shape[2]):
sum1 += intensity[:, :, i, :] * v[i]
ds["MOMENTS_WEIGHTED_COORD"] = sum1/ds["MOMENTS_INTERGRATED"]
sum1=0
for i in range(intensity.shape[2]):
sum1 = intensity[:, :, i, :]*pow((v[i]-ds["MOMENTS_WEIGHTED_COORD"]),2)
ds["MOMENTS_WEIGHTED_DISPERSION_COORD"] = np.sqrt(sum1 / ds["MOMENTS_INTERGRATED"])
if 3 in moment:
ds["MOMENTS_MEDIAN"] = intensity.median(dim=axis)
if 4 in moment:
ds["MOMENTS_MEDIAN_COORD"] = np.quantile(intensity.values, .25)
if 5 in moment:
sd = pow((intensity - intensity.mean(dim=axis)),2)
standarddeviation = np.sqrt(sd.sum(dim=axis) / (intensity.shape[2]-1))
ds["MOMENTS_STANDARD_DEVIATION"] = standarddeviation
# The default xarray.std returns large difference between casa6 and CNGI
# ds["MOMENTS_STANDARD_DEVIATION"] = intensity.std(dim=axis)
if 6 in moment:
ds["MOMENTS_RMS"] = np.sqrt((np.fabs(intensity * intensity)).mean(dim=axis))
if 7 in moment:
sd = np.fabs((intensity-intensity.mean(dim=axis)))
absmeandev = sd.mean(dim=axis)
ds["MOMENTS_ABS_MEAN_DEV"] = absmeandev
if 8 in moment:
ds["MOMENTS_MAXIMUM"] = intensity.max(dim=axis)
# moments of maximum coordinate unit is km/m
if 9 in moment:
mcnparray = intensity.argmax(dim=axis).values.astype(np.float32)
for i in range(intensity.shape[2]):
mcnparray[mcnparray==i]=v[i]
ds["MOMENTS_MAXIMUM_COORD"] = xa.DataArray(mcnparray,
coords=ds["MOMENTS_MAXIMUM"].coords,
dims=ds["MOMENTS_MAXIMUM"].dims)
if 10 in moment:
ds["MOMENTS_MINIMUM"] = intensity.min(dim=axis)
# moments of maximum coordinate unit is km/m
if 11 in moment:
mcnparray = intensity.argmin(dim=axis).values.astype(np.float32)
for i in range(intensity.shape[2]):
mcnparray[mcnparray == i] = v[i]
ds["MOMENTS_MINIMUM_COORD"] = xa.DataArray(mcnparray,
coords=ds["MOMENTS_MAXIMUM"].coords,
dims=ds["MOMENTS_MAXIMUM"].dims)
return ds
| {
"alphanum_fraction": 0.635059761,
"author": null,
"avg_line_length": 40.2243589744,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f130f5fcdcef94fac4fa94fce15399d8cfbbf10f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "421a99c460f4092b79120f5bec122de7ce9b8b96",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "FedeMPouzols/cngi_prototype",
"max_forks_repo_path": "cngi/image/moments.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "421a99c460f4092b79120f5bec122de7ce9b8b96",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "FedeMPouzols/cngi_prototype",
"max_issues_repo_path": "cngi/image/moments.py",
"max_line_length": 126,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "421a99c460f4092b79120f5bec122de7ce9b8b96",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "FedeMPouzols/cngi_prototype",
"max_stars_repo_path": "cngi/image/moments.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1568,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6275
} |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
from collections import defaultdict
import dataclasses
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import numpy as np
from transformers import RobertaTokenizerFast
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed
)
import torch.nn as nn
from transformers import AutoModel, AutoConfig
from transformers import PreTrainedModel, BertPreTrainedModel
from transformers.modeling_outputs import QuestionAnsweringModelOutput
# from transformers import _BaseAutoModelClass
from torch.nn import MSELoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss
import torch
logger = logging.getLogger(__name__)
class ClassificationSent(nn.Module):
"""Sent for sentence-level classification tasks."""
def __init__(self, config):
super(ClassificationSent, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, input_ids, labels=None):
x = []
sep_count =[]
sent_labels= []
for i, ids in enumerate(input_ids):
sep = torch.where(ids==2)
if (sep[0][-1]<ids.size(0)-1 and ids[sep[0][-1]+1] == 1) or sep[0][-1]==ids.size(0)-1: # if the last <sep> is the end of para graph
x.append(features[i,sep[0][:-1],:])
trip_sep = sep[0][:-1].size(0)
else:
x.append(features[i,sep[0],:])
trip_sep = sep[0].size(0)
sep_count.extend([i]*trip_sep)
if labels is not None:
sent_labels.append(labels[i,:trip_sep])
if labels is not None:
sent_labels = torch.cat(sent_labels, dim=0)
sep_count = torch.tensor(sep_count)
x = torch.cat(x, dim=0)
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
y = self.out_proj(x)
return y, x, sent_labels, sep_count
class JointQAModel(BertPreTrainedModel):
# here you need to add a answer classfier to predict the start and end position
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.model = AutoModel.from_config(config)
self.para_classifier = nn.Linear(config.hidden_size, config.num_labels)
self.sent_classifier = ClassificationSent(config)
self.tokenizer=RobertaTokenizerFast.from_pretrained('roberta-base')
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.use_sent_loss = config.use_sent_loss
self.use_qa_loss = config.use_qa_loss
print("use sentence loss in the training: ", self.use_sent_loss)
print("use qa loss in the training: ", self.use_qa_loss)
self.init_weights()
self.losses = defaultdict(list)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions= None,
end_positions = None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return_dict = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
logits = self.para_classifier(outputs.pooler_output)
qa_logits = self.qa_outputs(outputs.last_hidden_state)
start_logits, end_logits = qa_logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
#print('model ansswer token = ' + str(torch.argmax(start_logits, dim=1)))
print('model answer start string = '+str(self.tokenizer.decode([input_ids[0][torch.argmax(start_logits, dim=1)[0]]])))
print('model answer end string = '+str(self.tokenizer.decode([input_ids[0][torch.argmax(end_logits, dim=1)[0]]])))
loss = None
output = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels[:,0].view(-1))
self.losses['loss1'].append(loss.item())
if self.use_sent_loss:
logits_sents, sents_features, sents_labels, sent_segment = self.sent_classifier(outputs.last_hidden_state, input_ids, labels[:,1:])
loss2 = loss_fct(logits_sents.view(-1), sents_labels.view(-1))
loss += loss2
self.losses['loss2'].append(loss2.item())
if self.use_qa_loss:
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
print("Start Position: ",self.tokenizer.decode([input_ids[0][start_positions[0]]])," End Position: ", self.tokenizer.decode([input_ids[0][end_positions[0]]]))
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
loss3 = (start_loss + end_loss) / 2
loss += loss3
self.losses['loss3'].append(loss3.item())
#print(f'QA Loss: {loss3}')
self.losses['Total_Loss'].append(loss.item())
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), label.view(-1))
#print(f'Total Loss: {loss}')
if not return_dict:
output = (logits,) + outputs[2:]
#print(outputs)
return ((loss,) + output) if loss is not None else output
return QuestionAnsweringModelOutput(
loss=loss,
logits=logits,
start_logits = start_logits,
end_logits = end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def predict(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
use_sent_loss = True,
use_qa_loss=True
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
logits = self.para_classifier(sequence_output)
logits_sents, sents_features, sents_labels, sent_segment = self.sent_classifier(sequence_output, input_ids, labels)
start_positions, end_positions = self.qa_outputs(outputs.last_hidden_state)
start_positions, end_positions = qa_logits.split(1, dim=-1)
start_positions = start_positions.squeeze(-1).contiguous()
end_positions = end_positions.squeeze(-1).contiguous()
return {
"para_logits":logits,
"sent_logits" : logits_sents,
"sent_segment":sent_segment,
"input_f":sents_features,
"start_logits":start_positions,
"end_logits":end_positions,
}
| {
"alphanum_fraction": 0.6182216542,
"author": null,
"avg_line_length": 41.0262172285,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6577701ac6f8999ba66d3523669caa14b2bb3a8a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0bae13c8e5acb769f36520406d29ef5845bc5d82",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "drocobeth/joint_model",
"max_forks_repo_path": "model_train/model.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0bae13c8e5acb769f36520406d29ef5845bc5d82",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "drocobeth/joint_model",
"max_issues_repo_path": "model_train/model.py",
"max_line_length": 182,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0bae13c8e5acb769f36520406d29ef5845bc5d82",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "drocobeth/joint_model",
"max_stars_repo_path": "model_train/model.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2361,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10954
} |
# coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for audio."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import scipy.signal
import tensorflow.compat.v1 as tf
def add_delta_deltas(filterbanks, name=None):
"""Compute time first and second-order derivative channels.
Args:
filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]
name: scope name
Returns:
float32 tensor with shape [batch_size, len, num_bins, 3]
"""
delta_filter = np.array([2, 1, 0, -1, -2])
delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full")
delta_filter_stack = np.array(
[[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2,
list(delta_delta_filter)],
dtype=np.float32).T[:, None, None, :]
delta_filter_stack /= np.sqrt(
np.sum(delta_filter_stack**2, axis=0, keepdims=True))
filterbanks = tf.nn.conv2d(
filterbanks, delta_filter_stack, [1, 1, 1, 1], "SAME", data_format="NHWC",
name=name)
return filterbanks
def compute_mel_filterbank_features(
waveforms,
sample_rate=16000, dither=1.0 / np.iinfo(np.int16).max, preemphasis=0.97,
frame_length=25, frame_step=10, fft_length=None,
window_fn=functools.partial(tf.signal.hann_window, periodic=True),
lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80,
log_noise_floor=1e-3, apply_mask=True):
"""Implement mel-filterbank extraction using tf ops.
Args:
waveforms: float32 tensor with shape [batch_size, max_len]
sample_rate: sampling rate of the waveform
dither: stddev of Gaussian noise added to waveform to prevent quantization
artefacts
preemphasis: waveform high-pass filtering constant
frame_length: frame length in ms
frame_step: frame_Step in ms
fft_length: number of fft bins
window_fn: windowing function
lower_edge_hertz: lowest frequency of the filterbank
upper_edge_hertz: highest frequency of the filterbank
num_mel_bins: filterbank size
log_noise_floor: clip small values to prevent numeric overflow in log
apply_mask: When working on a batch of samples, set padding frames to zero
Returns:
filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]
"""
# `stfts` is a complex64 Tensor representing the short-time Fourier
# Transform of each signal in `signals`. Its shape is
# [batch_size, ?, fft_unique_bins]
# where fft_unique_bins = fft_length // 2 + 1
# Find the wave length: the largest index for which the value is !=0
# note that waveforms samples that are exactly 0.0 are quite common, so
# simply doing sum(waveforms != 0, axis=-1) will not work correctly.
wav_lens = tf.reduce_max(
tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) *
tf.to_int32(tf.not_equal(waveforms, 0.0)),
axis=-1) + 1
if dither > 0:
waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither)
if preemphasis > 0:
waveforms = waveforms[:, 1:] - preemphasis * waveforms[:, :-1]
wav_lens -= 1
frame_length = int(frame_length * sample_rate / 1e3)
frame_step = int(frame_step * sample_rate / 1e3)
if fft_length is None:
fft_length = int(2**(np.ceil(np.log2(frame_length))))
stfts = tf.signal.stft(
waveforms,
frame_length=frame_length,
frame_step=frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=True)
stft_lens = (wav_lens + (frame_step - 1)) // frame_step
masks = tf.to_float(tf.less_equal(
tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0),
tf.expand_dims(stft_lens, 1)))
# An energy spectrogram is the magnitude of the complex-valued STFT.
# A float32 Tensor of shape [batch_size, ?, 257].
magnitude_spectrograms = tf.abs(stfts)
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = magnitude_spectrograms.shape[-1].value
linear_to_mel_weight_matrix = (
tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz))
mel_spectrograms = tf.tensordot(
magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for tensordot does not currently handle this case.
mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms))
if apply_mask:
log_mel_sgram *= tf.expand_dims(tf.to_float(masks), -1)
return tf.expand_dims(log_mel_sgram, -1, name="mel_sgrams")
| {
"alphanum_fraction": 0.724078671,
"author": null,
"avg_line_length": 37.6762589928,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c4efd96c8e7fbfee089bc4545a195ddd97d88979",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-05-03T17:34:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-03T17:34:21.000Z",
"max_forks_repo_head_hexsha": "7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "SamuelmsWong/tensor2tensor",
"max_forks_repo_path": "tensor2tensor/layers/common_audio.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "SamuelmsWong/tensor2tensor",
"max_issues_repo_path": "tensor2tensor/layers/common_audio.py",
"max_line_length": 80,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "SamuelmsWong/tensor2tensor",
"max_stars_repo_path": "tensor2tensor/layers/common_audio.py",
"max_stars_repo_stars_event_max_datetime": "2021-01-19T21:36:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-19T20:21:15.000Z",
"num_tokens": 1410,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5237
} |
r=0.62
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7xw26/media/images/d7xw26-059/svc:tesseract/full/full/0.62/default.jpg Accept:application/hocr+xml
| {
"alphanum_fraction": 0.8256410256,
"author": null,
"avg_line_length": 65,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "e1b60f23815764221627d0ba6d3e5c655aadc69c",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ucd-library/wine-price-extraction",
"max_forks_repo_path": "tesseract/rotate/d7xw26-059.r",
"max_issues_count": 21,
"max_issues_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f",
"max_issues_repo_issues_event_max_datetime": "2019-07-02T19:19:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-09-18T17:41:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ucd-library/wine-price-extraction",
"max_issues_repo_path": "tesseract/rotate/d7xw26-059.r",
"max_line_length": 187,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "c346e48b5cda8377335b66e4a1f57c013aa06f1f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ucd-library/wine-price-extraction",
"max_stars_repo_path": "ark_87287/d7xw26/d7xw26-059/rotated.r",
"max_stars_repo_stars_event_max_datetime": "2021-04-21T18:04:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-11-16T19:55:13.000Z",
"num_tokens": 62,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 195
} |
/*
Copyright (c) 2005-2020, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ABSTRACTCARDIACTISSUE_HPP_
#define ABSTRACTCARDIACTISSUE_HPP_
#include <set>
#include <vector>
#include <boost/shared_ptr.hpp>
#include "UblasMatrixInclude.hpp"
#include "ChasteSerialization.hpp"
#include "ClassIsAbstract.hpp"
#include "ChasteSerializationVersion.hpp"
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/split_member.hpp>
#include "AbstractCardiacCellInterface.hpp"
#include "FakeBathCell.hpp"
#include "AbstractCardiacCellFactory.hpp"
#include "AbstractConductivityTensors.hpp"
#include "AbstractPurkinjeCellFactory.hpp"
#include "ReplicatableVector.hpp"
#include "HeartConfig.hpp"
#include "ArchiveLocationInfo.hpp"
#include "AbstractDynamicallyLoadableEntity.hpp"
#include "DynamicModelLoaderRegistry.hpp"
#include "AbstractConductivityModifier.hpp"
/**
* Class containing "tissue-like" functionality used in monodomain and bidomain
* problems.
*
* Contains the cardiac cells (ODE systems for each node of the mesh) and
* conductivity tensors (dependent on fibre directions).
*
* Also contains knowledge of parallelisation in the form of the
* distributed vector factory. This class deals with created a distributed
* vector of cells, and getting the ionic current and stimuli from these
* cells and putting them in replicated arrays for the PDE solvers to call.
*/
template <unsigned ELEMENT_DIM, unsigned SPACE_DIM = ELEMENT_DIM>
class AbstractCardiacTissue : private boost::noncopyable
{
private:
/** Needed for serialization. */
friend class boost::serialization::access;
friend class TestMonodomainTissue;
/**
* Archive the member variables.
*
* @param archive
* @param version
*/
template<class Archive>
void save(Archive & archive, const unsigned int version) const
{
if (version >= 3)
{
archive & mHasPurkinje;
}
if (version >= 2)
{
archive & mExchangeHalos;
}
// Don't use the std::vector serialization for cardiac cells, so that we can load them
// more cleverly when migrating checkpoints.
SaveCardiacCells(*ProcessSpecificArchive<Archive>::Get(), version);
// archive & mpMesh; Archived in save/load_constructs at the bottom of Mono/BidomainTissue.hpp
// archive & mpIntracellularConductivityTensors; Loaded from HeartConfig every time constructor is called
if (HeartConfig::Instance()->IsMeshProvided() && HeartConfig::Instance()->GetLoadMesh())
{
switch (HeartConfig::Instance()->GetConductivityMedia())
{
case cp::media_type::Orthotropic:
{
FileFinder source_file(mFibreFilePathNoExtension + ".ortho", RelativeTo::AbsoluteOrCwd);
assert(source_file.Exists());
FileFinder dest_file(ArchiveLocationInfo::GetArchiveRelativePath() + ArchiveLocationInfo::GetMeshFilename() + ".ortho", RelativeTo::ChasteTestOutput);
TRY_IF_MASTER(source_file.CopyTo(dest_file));
break;
}
case cp::media_type::Axisymmetric:
{
FileFinder source_file(mFibreFilePathNoExtension + ".axi", RelativeTo::AbsoluteOrCwd);
assert(source_file.Exists());
FileFinder dest_file(ArchiveLocationInfo::GetArchiveRelativePath()
+ ArchiveLocationInfo::GetMeshFilename() + ".axi", RelativeTo::ChasteTestOutput);
TRY_IF_MASTER(source_file.CopyTo(dest_file));
break;
}
case cp::media_type::NoFibreOrientation:
break;
default :
NEVER_REACHED;
}
}
// archive & mIionicCacheReplicated; // will be regenerated
// archive & mIntracellularStimulusCacheReplicated; // will be regenerated
archive & mDoCacheReplication;
// archive & mMeshUnarchived; Not archived since set to true when archiving constructor is called.
(*ProcessSpecificArchive<Archive>::Get()) & mpDistributedVectorFactory;
// Paranoia: check we agree with the mesh on who owns what
assert(mpDistributedVectorFactory == mpMesh->GetDistributedVectorFactory());
assert(mpDistributedVectorFactory->GetLow()==mpMesh->GetDistributedVectorFactory()->GetLow());
assert(mpDistributedVectorFactory->GetLocalOwnership()==mpMesh->GetDistributedVectorFactory()->GetLocalOwnership());
}
/**
* Unarchive the member variables.
*
* @param archive
* @param version
*/
template<class Archive>
void load(Archive & archive, const unsigned int version)
{
// archive & mpMesh; Archived in save/load_constructs at the bottom of Mono/BidomainTissue.hpp
// archive & mpIntracellularConductivityTensors; Loaded from HeartConfig every time constructor is called
if (version >= 3)
{
archive & mHasPurkinje;
if (mHasPurkinje)
{
mPurkinjeIionicCacheReplicated.Resize(mpDistributedVectorFactory->GetProblemSize());
}
}
if (version >= 2)
{
archive & mExchangeHalos;
if (mExchangeHalos)
{
mpMesh->CalculateNodeExchange(mNodesToSendPerProcess, mNodesToReceivePerProcess);
CalculateHaloNodesFromNodeExchange();
unsigned num_halo_nodes = mHaloNodes.size();
mHaloCellsDistributed.resize( num_halo_nodes );
for (unsigned local_index = 0; local_index < num_halo_nodes; local_index++)
{
unsigned global_index = mHaloNodes[local_index];
mHaloGlobalToLocalIndexMap[global_index] = local_index;
}
}
}
// mCellsDistributed & mHaloCellsDistributed:
LoadCardiacCells(*ProcessSpecificArchive<Archive>::Get(), version);
// archive & mIionicCacheReplicated; // will be regenerated
// archive & mIntracellularStimulusCacheReplicated; // will be regenerated
archive & mDoCacheReplication;
// we no longer have a bool mDoOneCacheReplication, but to maintain backwards compatibility
// we archive something if version==0
if (version==0)
{
bool do_one_cache_replication = true;
archive & do_one_cache_replication;
}
(*ProcessSpecificArchive<Archive>::Get()) & mpDistributedVectorFactory;
// Paranoia: check we agree with the mesh on who owns what
assert(mpDistributedVectorFactory == mpMesh->GetDistributedVectorFactory());
assert(mpDistributedVectorFactory->GetLow()==mpMesh->GetDistributedVectorFactory()->GetLow());
assert(mpDistributedVectorFactory->GetLocalOwnership()==mpMesh->GetDistributedVectorFactory()->GetLocalOwnership());
// archive & mMeshUnarchived; Not archived since set to true when archiving constructor is called.
// not archiving mpConductivityModifier for the time being (mechanics simulations are only use-case at the moment, and they
// do not get archived...). mpConductivityModifier has to be reset to NULL upon load.
mpConductivityModifier = NULL;
}
BOOST_SERIALIZATION_SPLIT_MEMBER()
/**
* Convenience method for intracellular conductivity tensor creation
*/
void CreateIntracellularConductivityTensor();
protected:
/** It's handy to keep a pointer to the mesh object*/
AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* mpMesh;
/** Intracellular conductivity tensors. Not archived, since it's loaded from the
* HeartConfig singleton. */
AbstractConductivityTensors<ELEMENT_DIM,SPACE_DIM>* mpIntracellularConductivityTensors;
/** The vector of cells. Distributed. */
std::vector< AbstractCardiacCellInterface* > mCellsDistributed;
/** The vector of the purkinje cells. Distributed.
* Empty unless a AbstractPurkinjeCellFactory is given to the constructor. */
std::vector< AbstractCardiacCellInterface* > mPurkinjeCellsDistributed;
/**
* Cache containing all the ionic currents for each node,
* replicated over all processes.
*/
ReplicatableVector mIionicCacheReplicated;
/**
* Cache containing all the ionic currents for each purkinje node,
* replicated over all processes.
*/
ReplicatableVector mPurkinjeIionicCacheReplicated;
/**
* Cache containing all the stimulus currents for each node,
* replicated over all processes.
*/
ReplicatableVector mIntracellularStimulusCacheReplicated;
/**
* Cache containing all the stimulus currents for each Purkinje node,
* replicated over all processes.
*/
ReplicatableVector mPurkinjeIntracellularStimulusCacheReplicated;
/** Local pointer to the HeartConfig singleton instance, for convenience. */
HeartConfig* mpConfig;
/**
* Local pointer to the distributed vector factory associated with the mesh object used.
*
* Used to retrieve node ownership range when needed.
*
* NB: This is set from mpMesh->GetDistributedVectorFactory() and thus always equal to
* that. We never assume ownership of the object.
*/
DistributedVectorFactory* mpDistributedVectorFactory;
/**
* Path to the location of the fibre file without extension.
*/
std::string mFibreFilePathNoExtension;
/**
* This class, if not NULL, will be used to modify the conductivity that is obtained from
* mpIntracellularConductivityTensors when rGetIntracellularConductivityTensor() is called.
* For example, it is required when conductivities become deformation dependent.
*/
AbstractConductivityModifier<ELEMENT_DIM,SPACE_DIM>* mpConductivityModifier;
/** Whether this tissue has any Purkinje cells. */
bool mHasPurkinje;
/**
* Whether we need to replicate the caches.
*
* When doing matrix-based RHS assembly, we only actually need information from
* cells/nodes local to the processor, so replicating the caches is an
* unnecessary communication overhead.
*
* Defaults to true.
*/
bool mDoCacheReplication;
/**
* Whether the mesh was unarchived or got from elsewhere.
*/
bool mMeshUnarchived;
/**
* Whether to exchange cell models across the halo boundaries.
* Used in state variable interpolation.
*/
bool mExchangeHalos;
/** Vector of halo node indices for current process */
std::vector<unsigned> mHaloNodes;
/** The vector of halo cells. Distributed. */
std::vector< AbstractCardiacCellInterface* > mHaloCellsDistributed;
/** Map of global to local indices for halo nodes. */
std::map<unsigned, unsigned> mHaloGlobalToLocalIndexMap;
/**
* A vector which will be of size GetNumProcs() where each internal vector except
* i=GetMyRank() contains an ordered list of indices of nodes to send to process i
* during data exchange
*/
std::vector<std::vector<unsigned> > mNodesToSendPerProcess;
/**
* A vector which will be of size GetNumProcs() for information to receive for
* process i.
*/
std::vector<std::vector<unsigned> > mNodesToReceivePerProcess;
/**
* If the mesh is a tetrahedral mesh then all elements and nodes are known.
* The halo nodes to the ones which are actually used as cardiac cells
* must be calculated explicitly.
*/
void CalculateHaloNodesFromNodeExchange();
/**
* If #mExchangeHalos is true, this method calls CalculateHaloNodesFromNodeExchange
* and sets up the halo cell data structures #mHaloCellsDistributed and #mHaloGlobalToLocalIndexMap.
*
* @param pCellFactory cell factory to use to create halo cells
*/
void SetUpHaloCells(AbstractCardiacCellFactory<ELEMENT_DIM,SPACE_DIM>* pCellFactory);
public:
/**
* This constructor is called from the Initialise() method of the CardiacProblem class.
* It creates all the cell objects, and sets up the conductivities.
*
* Note that pCellFactory contains a pointer to the mesh
*
* @param pCellFactory factory to use to create cardiac cells.
* If this is actually an AbstractPurkinjeCellFactory it creates purkinje cells.
* @param exchangeHalos used in state-variable interpolation. Defaults to false.
*/
AbstractCardiacTissue(AbstractCardiacCellFactory<ELEMENT_DIM,SPACE_DIM>* pCellFactory, bool exchangeHalos=false);
/**
* This constructor is called by the archiver only.
*
* @param pMesh a pointer to the AbstractTetrahedral mesh.
*/
AbstractCardiacTissue(AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* pMesh);
/** Virtual destructor */
virtual ~AbstractCardiacTissue();
/** @return whether this tissue contains Purkinje fibres. */
bool HasPurkinje();
/**
* Set whether or not to replicate the caches across all processors.
*
* See also mDoCacheReplication.
* @param doCacheReplication - true if the cache needs to be replicated
*/
void SetCacheReplication(bool doCacheReplication);
/**
* Get whether or not to replicate the caches across all processors.
*
* @return mDoCacheReplication - true if the cache needs to be replicated
*/
bool GetDoCacheReplication();
/** @return the intracellular conductivity tensor for the given element
* @param elementIndex index of the element of interest
*/
const c_matrix<double, SPACE_DIM, SPACE_DIM>& rGetIntracellularConductivityTensor(unsigned elementIndex);
/**
* @return the extracellular conductivity tensor for the given element
* (this throws an exception in this abstract class since monodomain don't have extracellular ones)
* it is overridden in the BidomainTissue.
*
* @param elementIndex index of the element of interest
*/
virtual const c_matrix<double, SPACE_DIM, SPACE_DIM>& rGetExtracellularConductivityTensor(unsigned elementIndex);
/**
* @return a pointer to a cell, indexed by the global node index.
*
* \note Should only called by the process owning the cell -
* triggers an assertion otherwise.
*
* @param globalIndex global node index for which to retrieve a cell
*/
AbstractCardiacCellInterface* GetCardiacCell( unsigned globalIndex );
/**
* @return a pointer to a Purkinje cell, indexed by the global node index.
*
* \note Should only called by the process owning the cell -
* triggers an assertion otherwise.
*
* @param globalIndex global node index for which to retrieve a cell
*/
AbstractCardiacCellInterface* GetPurkinjeCell( unsigned globalIndex );
/**
* @return a pointer to a halo cell, indexed by the global node index.
*
* \note Should only called by the process halo owning the cell -
* triggers an assertion otherwise.
*
* @param globalIndex global node index for which to retrieve a cell
*/
AbstractCardiacCellInterface* GetCardiacCellOrHaloCell( unsigned globalIndex );
/**
* Integrate the cell ODEs and update ionic current etc for each of the
* cells, between the two times provided.
*
* @param existingSolution the current voltage solution vector
* @param time the current simulation time
* @param nextTime when to simulate the cells until
* @param updateVoltage whether to also solve for the voltage (generally false, true for operator splitting methods). Defaults to false
*/
virtual void SolveCellSystems(Vec existingSolution, double time, double nextTime, bool updateVoltage=false);
/** @return the entire ionic current cache */
ReplicatableVector& rGetIionicCacheReplicated();
/** @return the entire stimulus current cache */
ReplicatableVector& rGetIntracellularStimulusCacheReplicated();
/** @return the entire Purkinje ionic current cache */
ReplicatableVector& rGetPurkinjeIionicCacheReplicated();
/** @return the entire Purkinje stimulus current cache */
ReplicatableVector& rGetPurkinjeIntracellularStimulusCacheReplicated();
/**
* Update the Iionic and intracellular stimulus caches.
*
* @param globalIndex global index of the entry to update
* @param localIndex local index of the entry to update
* @param nextTime the next PDE time point, at which to evaluate the stimulus current
*/
void UpdateCaches(unsigned globalIndex, unsigned localIndex, double nextTime);
/**
* Update the Iionic and intracellular stimulus caches for Purkinje cells.
*
* @param globalIndex global index of the entry to update
* @param localIndex local index of the entry to update
* @param nextTime the next PDE time point, at which to evaluate the stimulus current
*/
void UpdatePurkinjeCaches(unsigned globalIndex, unsigned localIndex, double nextTime);
/**
* Replicate the Iionic and intracellular stimulus caches.
*/
void ReplicateCaches();
/**
* @return a reference to the vector of distributed cells. Needed for archiving.
*/
const std::vector<AbstractCardiacCellInterface*>& rGetCellsDistributed() const;
/**
* @return a reference to the vector of distributed Purkinje cells. Needed for archiving.
*/
const std::vector<AbstractCardiacCellInterface*>& rGetPurkinjeCellsDistributed() const;
/**
* @return a pointer to the mesh object
*
* @return pointer to mesh object
*/
const AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* pGetMesh() const;
/**
* Set a modifier class which will be used to modifier a conductivity obtained from mpIntracellularConductivityTensors
* when rGetIntracellularConductivityTensor() is called. For example, it is required when conductivities become deformation-dependent.
* @param pModifier Pointer to the concrete modifier class
*/
void SetConductivityModifier(AbstractConductivityModifier<ELEMENT_DIM,SPACE_DIM>* pModifier);
/**
* Save our tissue to an archive.
*
* Writes:
* -# #mpDistributedVectorFactory
* -# number of cells on this process
* -# each cell pointer in turn, interleaved with Purkinje cells if present
*
* @param archive the process-specific archive to write cells to.
* @param version
*/
template<class Archive>
void SaveCardiacCells(Archive & archive, const unsigned int version) const
{
const std::vector<AbstractCardiacCellInterface*> & r_cells_distributed = rGetCellsDistributed();
assert(mpDistributedVectorFactory == this->mpMesh->GetDistributedVectorFactory());
archive & mpDistributedVectorFactory; // Needed when loading
const unsigned num_cells = r_cells_distributed.size();
archive & num_cells;
for (unsigned i=0; i<num_cells; i++)
{
AbstractDynamicallyLoadableEntity* p_entity = dynamic_cast<AbstractDynamicallyLoadableEntity*>(r_cells_distributed[i]);
bool is_dynamic = (p_entity != NULL);
archive & is_dynamic;
if (is_dynamic)
{
#ifdef CHASTE_CAN_CHECKPOINT_DLLS
archive & p_entity->GetLoader()->GetLoadableModulePath();
#else
// We should have thrown an exception before this point
NEVER_REACHED;
#endif // CHASTE_CAN_CHECKPOINT_DLLS
}
archive & r_cells_distributed[i];
if (mHasPurkinje)
{
archive & rGetPurkinjeCellsDistributed()[i];
}
}
}
/**
* Load our tissue from an archive.
*
* Handles the checkpoint migration case, deleting loaded cells immediately if they are
* not local to this process.
*
* Also loads halo cells if we're doing halo exchange, by using the non-local cells from the
* archive.
*
* @param archive the process-specific archive to load from
* @param version archive version
*/
template<class Archive>
void LoadCardiacCells(Archive & archive, const unsigned int version)
{
// Note that p_factory loaded from this archive might not be the same as our mesh's factory,
// since we're loading a process-specific archive that could have been written by any process.
// We therefore need to use p_mesh_factory to determine the partitioning in use for the resumed
// simulation, and p_factory to determine what the original partitioning was when the simulation
// was saved.
DistributedVectorFactory* p_factory;
DistributedVectorFactory* p_mesh_factory = this->mpMesh->GetDistributedVectorFactory();
archive & p_factory;
unsigned num_cells;
archive & num_cells;
if (mCellsDistributed.empty())
{
mCellsDistributed.resize(p_mesh_factory->GetLocalOwnership());
#ifndef NDEBUG
// Paranoia
for (unsigned i=0; i<mCellsDistributed.size(); i++)
{
assert(mCellsDistributed[i] == NULL);
}
#endif
}
else
{
assert(mCellsDistributed.size() == p_mesh_factory->GetLocalOwnership());
}
if (mHasPurkinje)
{
if (mPurkinjeCellsDistributed.empty())
{
mPurkinjeCellsDistributed.resize(p_mesh_factory->GetLocalOwnership());
}
else
{
assert(mPurkinjeCellsDistributed.size() == p_mesh_factory->GetLocalOwnership());
}
}
// We don't store a cell index in the archive, so need to work out what global index this tissue starts at.
// If we have an original factory we use the original low index; otherwise we use the current low index.
unsigned index_low = p_factory->GetOriginalFactory() ? p_factory->GetOriginalFactory()->GetLow() : p_mesh_factory->GetLow();
// Track fake cells (which might have multiple pointers to the same object) to make sure we only delete non-local ones
std::set<FakeBathCell*> fake_cells_non_local, fake_cells_local;
/*
* Historical note:
*
* We always do a dumb partition when we unarchive.
*
* When unarchive was first implemented in parallel (migration #1199) it was thought that we might want to repartition the mesh. This would be feasible and would give
* better partitions when we move to different numbers of processes. However it would require us to apply a new permutation to entire data structure.
*
* In the case where the original mesh was permuted and *copied* into the archive, we need to apply the stored permutation to the mesh but not to the archive (cells). That
* is, any permutation stored with the mesh can be safely ignored. (If we also had to repartition/permute the archive, then we would be applying a double permutation to the
* mesh and a single permutation to archive.)
*
*/
for (unsigned local_index=0; local_index<num_cells; local_index++)
{
// Figure out where this cell goes
unsigned global_index = index_low + local_index;
bool local = p_mesh_factory->IsGlobalIndexLocal(global_index);
// Check if this will be a halo cell
std::map<unsigned, unsigned>::const_iterator halo_position;
bool halo = ((halo_position=mHaloGlobalToLocalIndexMap.find(global_index)) != mHaloGlobalToLocalIndexMap.end());
// halo_position->second is local halo index
bool is_dynamic;
archive & is_dynamic;
if (is_dynamic)
{
#ifdef CHASTE_CAN_CHECKPOINT_DLLS
// Ensure the shared object file for this cell model is loaded.
// We need to do this here, rather than in the class' serialization code,
// because that code won't be available until this is done...
std::string shared_object_path;
archive & shared_object_path;
DynamicModelLoaderRegistry::Instance()->GetLoader(shared_object_path);
#else
// Could only happen on Mac OS X, and will probably be trapped earlier.
NEVER_REACHED;
#endif // CHASTE_CAN_CHECKPOINT_DLLS
}
AbstractCardiacCellInterface* p_cell;
archive & p_cell;
AbstractCardiacCellInterface* p_purkinje_cell = NULL;
if (mHasPurkinje)
{
archive & p_purkinje_cell;
}
// Check if it's a fake cell
FakeBathCell* p_fake = dynamic_cast<FakeBathCell*>(p_cell);
if (p_fake)
{
if (halo || local)
{
fake_cells_local.insert(p_fake);
}
else
{
fake_cells_non_local.insert(p_fake);
}
}
FakeBathCell* p_fake_purkinje = dynamic_cast<FakeBathCell*>(p_purkinje_cell);
if (p_fake_purkinje)
{
if (halo || local)
{
fake_cells_local.insert(p_fake_purkinje);
}
else
{
fake_cells_non_local.insert(p_fake_purkinje);
}
}
// Add real cells to the local or halo vectors
if (local)
{
// Note that the original local_index was relative to the archived partition (distributed vector)
// The new_local_index is local relative to the new partition in memory
unsigned new_local_index = global_index - p_mesh_factory->GetLow();
assert(mCellsDistributed[new_local_index] == NULL);
mCellsDistributed[new_local_index] = p_cell;
if (mHasPurkinje)
{
assert(mPurkinjeCellsDistributed[new_local_index] == NULL);
mPurkinjeCellsDistributed[new_local_index] = p_purkinje_cell;
}
}
else if (halo)
{
assert(mHaloCellsDistributed[halo_position->second] == NULL);
mHaloCellsDistributed[halo_position->second] = p_cell;
}
else
{
if (!p_fake)
{
// Non-local real cell, so free the memory.
delete p_cell;
}
if (!p_fake_purkinje)
{
// This will be NULL if there's no Purkinje, so a delete is OK.
delete p_purkinje_cell;
}
}
}
// Delete any unused fake cells
for (std::set<FakeBathCell*>::iterator it = fake_cells_non_local.begin();
it != fake_cells_non_local.end();
++it)
{
if (fake_cells_local.find(*it) == fake_cells_local.end())
{
delete (*it);
}
}
}
};
TEMPLATED_CLASS_IS_ABSTRACT_2_UNSIGNED(AbstractCardiacTissue)
namespace boost {
namespace serialization {
/**
* Specify a version number for archive backwards compatibility.
*
* This is how to do BOOST_CLASS_VERSION(AbstractCardiacTissue, 1)
* with a templated class.
*/
template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
struct version<AbstractCardiacTissue<ELEMENT_DIM, SPACE_DIM> >
{
///Macro to set the version number of templated archive in known versions of Boost
CHASTE_VERSION_CONTENT(3);
};
} // namespace serialization
} // namespace boost
#endif /*ABSTRACTCARDIACTISSUE_HPP_*/
| {
"alphanum_fraction": 0.6650842471,
"author": null,
"avg_line_length": 39.9359145527,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "5914730ebdc2bba37fd71cca07fac1f8a163f499",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-09-10T16:12:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-09-10T16:12:21.000Z",
"max_forks_repo_head_hexsha": "17d343c09a246a50f9e3a3cbfc399ca6bef353ce",
"max_forks_repo_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"max_forks_repo_name": "SoftMatterMechanics/ApicalStressFibers",
"max_forks_repo_path": "heart/src/tissue/AbstractCardiacTissue.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "17d343c09a246a50f9e3a3cbfc399ca6bef353ce",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"max_issues_repo_name": "SoftMatterMechanics/ApicalStressFibers",
"max_issues_repo_path": "heart/src/tissue/AbstractCardiacTissue.hpp",
"max_line_length": 181,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "17d343c09a246a50f9e3a3cbfc399ca6bef353ce",
"max_stars_repo_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"max_stars_repo_name": "SoftMatterMechanics/ApicalStressFibers",
"max_stars_repo_path": "heart/src/tissue/AbstractCardiacTissue.hpp",
"max_stars_repo_stars_event_max_datetime": "2020-09-10T16:12:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-10T16:12:13.000Z",
"num_tokens": 6485,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 29912
} |
#!/usr/bin/env python
u"""
gen_point_load.py
Written by Tyler Sutterley (07/2020)
Calculates gravitational spherical harmonic coefficients for point masses
CALLING SEQUENCE:
Ylms = gen_point_load(data, lon, lat, LMAX=LMAX)
INPUTS:
data: data magnitude
lon: longitude of points
lat: latitude of points
OUTPUTS:
clm: cosine spherical harmonic coefficients (geodesy normalization)
slm: sine spherical harmonic coefficients (geodesy normalization)
l: spherical harmonic degree to LMAX
m: spherical harmonic order to MMAX
OPTIONS:
LMAX: Upper bound of Spherical Harmonic Degrees
MMAX: Upper bound of Spherical Harmonic Orders
UNITS: input data units
1: grams of mass (default)
2: gigatonnes of mass
LOVE: input load Love numbers up to degree LMAX (hl,kl,ll)
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
scipy: Scientific Tools for Python (https://docs.scipy.org/doc/)
PROGRAM DEPENDENCIES:
legendre.py: Computes associated Legendre polynomials for degree l
units.py: class for converting spherical harmonic data to specific units
UPDATE HISTORY:
Updated 07/2020: added function docstrings
Written 05/2020
"""
import numpy as np
from gravity_toolkit.legendre import legendre
from gravity_toolkit.units import units
def gen_point_load(data, lon, lat, LMAX=60, MMAX=None, UNITS=1, LOVE=None):
"""
Calculates spherical harmonic coefficients for point masses
Arguments
---------
data: data magnitude
lon: longitude of points
lat: latitude of points
Keyword arguments
-----------------
LMAX: Upper bound of Spherical Harmonic Degrees
MMAX: Upper bound of Spherical Harmonic Orders
UNITS: input data units
1: grams of mass (default)
2: gigatonnes of mass
LOVE: input load Love numbers up to degree LMAX (hl,kl,ll)
Returns
-------
clm: cosine spherical harmonic coefficients
slm: sine spherical harmonic coefficients
l: spherical harmonic degree to LMAX
m: spherical harmonic order to MMAX
"""
#-- upper bound of spherical harmonic orders (default == LMAX)
if MMAX is None:
MMAX = np.copy(LMAX)
#-- number of input data points
npts = len(data.flatten())
#-- convert output longitude and latitude into radians
phi = np.pi*lon.flatten()/180.0
theta = np.pi*(90.0 - lat.flatten())/180.0
#-- SH Degree dependent factors to convert into geodesy normalized SH's
#-- use splat operator to extract arrays of kl, hl, and ll Love Numbers
factors = units(lmax=LMAX).spatial(*LOVE)
#-- extract degree dependent factor for specific units
int_fact = np.zeros((npts))
if (UNITS == 1):
#-- Default Parameter: Input in g
dfactor = factors.cmwe/(factors.rad_e**2)
int_fact[:] = 1.0
elif (UNITS == 2):
#-- Input in gigatonnes (Gt)
dfactor = factors.cmwe/(factors.rad_e**2)
int_fact[:] = 1e15
#-- flattened form of data converted to units
D = int_fact*data.flatten()
#-- output harmonics
Ylms = {}
Ylms['clm'] = np.zeros((LMAX+1,MMAX+1))
Ylms['slm'] = np.zeros((LMAX+1,MMAX+1))
Ylms['l'] = np.arange(LMAX+1)
Ylms['m'] = np.arange(MMAX+1)
#-- for each degree l
for l in range(LMAX+1):
m1 = np.min([l,MMAX]) + 1
SPH = spherical_harmonic_matrix(l,D,phi,theta,dfactor[l])
#-- truncate to spherical harmonic order and save to output
Ylms['clm'][l,:m1] = SPH.real[:m1]
Ylms['slm'][l,:m1] = SPH.imag[:m1]
#-- output harmonics
return Ylms
#-- calculate spherical harmonics of degree l evaluated at (theta,phi)
def spherical_harmonic_matrix(l,data,phi,theta,coeff):
"""
Calculates spherical harmonics of degree l evaluated at coordinates
Arguments
---------
l: spherical harmonic degree
data: data magnitude in grams
phi: longitude of points in radians
theta: colatitude of points in radians
coeff: degree-dependent factor for converting units
Returns
-------
Ylms: spherical harmonic coefficients in Eulerian form
"""
#-- calculate normalized legendre polynomials (points, order)
Pl = legendre(l, np.cos(theta), NORMALIZE=True).T
#-- spherical harmonic orders up to degree l
m = np.arange(0,l+1)
#-- calculate Euler's of spherical harmonic order multiplied by azimuth phi
mphi = np.exp(1j*np.dot(np.squeeze(phi)[:,np.newaxis],m[np.newaxis,:]))
#-- reshape data to order
D = np.kron(np.ones((1,l+1)), data[:,np.newaxis])
#-- calculate spherical harmonics and multiply by coefficients and data
Ylms = coeff*D*Pl*mphi
#-- calculate the sum over all points and return harmonics for degree l
return np.sum(Ylms,axis=0)
| {
"alphanum_fraction": 0.6731727575,
"author": null,
"avg_line_length": 33.6783216783,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "524718bb86447672551f532ce62f10baa1bfe70d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "aa48506a64860809249164a9bcaebf679d41f6ff",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "yaramohajerani/read-GRACE-harmonics",
"max_forks_repo_path": "gravity_toolkit/gen_point_load.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "aa48506a64860809249164a9bcaebf679d41f6ff",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "yaramohajerani/read-GRACE-harmonics",
"max_issues_repo_path": "gravity_toolkit/gen_point_load.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "aa48506a64860809249164a9bcaebf679d41f6ff",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "yaramohajerani/read-GRACE-harmonics",
"max_stars_repo_path": "gravity_toolkit/gen_point_load.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1282,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4816
} |
[STATEMENT]
lemma resChainI:
fixes P :: pi
and Q :: pi
and Rel :: "(pi \<times> pi) set"
and lst :: "name list"
assumes eqvtRel: "eqvt Rel"
and Res: "\<And>R S x. (R, S) \<in> Rel \<Longrightarrow> (<\<nu>x>R, <\<nu>x>S) \<in> Rel"
and PRelQ: "P \<leadsto>[Rel] Q"
shows "(resChain lst) P \<leadsto>[Rel] (resChain lst) Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. resChain lst P \<leadsto>[Rel] resChain lst Q
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. resChain lst P \<leadsto>[Rel] resChain lst Q
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. resChain lst P \<leadsto>[Rel] resChain lst Q
[PROOF STEP]
proof(induct lst)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. resChain [] P \<leadsto>[Rel] resChain [] Q
2. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
(* Base case *)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. resChain [] P \<leadsto>[Rel] resChain [] Q
2. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
from PRelQ
[PROOF STATE]
proof (chain)
picking this:
P \<leadsto>[Rel] Q
[PROOF STEP]
show "resChain [] P \<leadsto>[Rel] resChain [] Q"
[PROOF STATE]
proof (prove)
using this:
P \<leadsto>[Rel] Q
goal (1 subgoal):
1. resChain [] P \<leadsto>[Rel] resChain [] Q
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
resChain [] P \<leadsto>[Rel] resChain [] Q
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
(* Inductive step *)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
fix a lst
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
assume IH: "(resChain lst P) \<leadsto>[Rel] (resChain lst Q)"
[PROOF STATE]
proof (state)
this:
resChain lst P \<leadsto>[Rel] resChain lst Q
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
resChain lst P \<leadsto>[Rel] resChain lst Q
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
from Res
[PROOF STATE]
proof (chain)
picking this:
(?R, ?S) \<in> Rel \<Longrightarrow> (<\<nu>?x>?R, <\<nu>?x>?S) \<in> Rel
[PROOF STEP]
have "\<And>P Q a. (P, Q) \<in> Rel \<Longrightarrow> (<\<nu>a>P, <\<nu>a>Q) \<in> Rel"
[PROOF STATE]
proof (prove)
using this:
(?R, ?S) \<in> Rel \<Longrightarrow> (<\<nu>?x>?R, <\<nu>?x>?S) \<in> Rel
goal (1 subgoal):
1. \<And>P Q a. (P, Q) \<in> Rel \<Longrightarrow> (<\<nu>a>P, <\<nu>a>Q) \<in> Rel
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(?P, ?Q) \<in> Rel \<Longrightarrow> (<\<nu>?a>?P, <\<nu>?a>?Q) \<in> Rel
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(?P, ?Q) \<in> Rel \<Longrightarrow> (<\<nu>?a>?P, <\<nu>?a>?Q) \<in> Rel
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
have "Rel \<subseteq> Rel"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Rel \<subseteq> Rel
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Rel \<subseteq> Rel
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
resChain lst P \<leadsto>[Rel] resChain lst Q
(?P, ?Q) \<in> Rel \<Longrightarrow> (<\<nu>?a>?P, <\<nu>?a>?Q) \<in> Rel
Rel \<subseteq> Rel
[PROOF STEP]
have "<\<nu>a>(resChain lst P) \<leadsto>[Rel] <\<nu>a>(resChain lst Q)"
[PROOF STATE]
proof (prove)
using this:
resChain lst P \<leadsto>[Rel] resChain lst Q
(?P, ?Q) \<in> Rel \<Longrightarrow> (<\<nu>?a>?P, <\<nu>?a>?Q) \<in> Rel
Rel \<subseteq> Rel
goal (1 subgoal):
1. <\<nu>a>resChain lst P \<leadsto>[Rel] <\<nu>a>resChain lst Q
[PROOF STEP]
using eqvtRel
[PROOF STATE]
proof (prove)
using this:
resChain lst P \<leadsto>[Rel] resChain lst Q
(?P, ?Q) \<in> Rel \<Longrightarrow> (<\<nu>?a>?P, <\<nu>?a>?Q) \<in> Rel
Rel \<subseteq> Rel
eqvt Rel
goal (1 subgoal):
1. <\<nu>a>resChain lst P \<leadsto>[Rel] <\<nu>a>resChain lst Q
[PROOF STEP]
by(rule_tac resPres)
[PROOF STATE]
proof (state)
this:
<\<nu>a>resChain lst P \<leadsto>[Rel] <\<nu>a>resChain lst Q
goal (1 subgoal):
1. \<And>a lst. resChain lst P \<leadsto>[Rel] resChain lst Q \<Longrightarrow> resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
thus "resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q"
[PROOF STATE]
proof (prove)
using this:
<\<nu>a>resChain lst P \<leadsto>[Rel] <\<nu>a>resChain lst Q
goal (1 subgoal):
1. resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
resChain (a # lst) P \<leadsto>[Rel] resChain (a # lst) Q
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
resChain lst P \<leadsto>[Rel] resChain lst Q
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Pi_Calculus_Strong_Early_Sim_Pres",
"hexsha": null,
"include": null,
"lang": null,
"length": 26,
"llama_tokens": 2543,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
program create_Hamming_window
! create a "pseudo plane wave" source file using tapering based on a Hamming window
! Dimitri Komatitsch, CNRS Marseille, France, February 2014,
! based on discussions with Paul Cristini, also from CNRS Marseille, France.
implicit none
! pi
double precision, parameter :: PI = 3.141592653589793d0
integer, parameter :: NSOURCES = 1000
! the plane wave will extend in the vertical direction from zmin to zmax
! and its amplitude in between will vary as a Hamming apodization window
double precision, parameter :: xs_center = -0.1d0
double precision, parameter :: zs_center = 0.07d0
double precision, parameter :: zs_size = 0.43d0
double precision, parameter :: zs_min = zs_center - zs_size/2.d0
double precision, parameter :: zs_max = zs_center + zs_size/2.d0
! angle in degrees by which we rotate the plane wave
double precision, parameter :: angle_rotate_source = -45.d0 * (PI / 180.d0)
double precision, parameter :: factor_max = 1.d10
integer :: isource
double precision :: x,hamming,xs,zs,xval,zval,xprime,zprime
do isource = 1,NSOURCES
! Hamming apodization window
! see e.g. http://docs.scipy.org/doc/numpy/reference/generated/numpy.hamming.html
! and http://www.mathworks.fr/fr/help/signal/ref/hamming.html
x = dble(isource - 1) / dble(NSOURCES - 1)
hamming = 0.54d0 - 0.46d0*cos(2*PI*x)
xs = xs_center
zs = zs_min + x * zs_size
! subtract the position of the center of rotation
xval = xs - xs_center
zval = zs - zs_center
! rotate it
xprime = xval*cos(angle_rotate_source) - zval*sin(angle_rotate_source)
zprime = xval*sin(angle_rotate_source) + zval*cos(angle_rotate_source)
! add the position of the center of rotation back
xprime = xprime + xs_center
zprime = zprime + zs_center
write(*,*) '# source ',isource
write(*,*) 'source_surf = .false.'
write(*,*) 'xs = ',xprime
write(*,*) 'zs = ',zprime
write(*,*) 'source_type = 1'
write(*,*) 'time_function_type = 1'
write(*,*) 'f0 = 1.d6'
write(*,*) 't0 = 0.d0'
write(*,*) 'angleforce = 0.0'
write(*,*) 'Mxx = 1.'
write(*,*) 'Mzz = 1.'
write(*,*) 'Mxz = 0.'
write(*,*) 'factor = ',factor_max * hamming
enddo
end program create_Hamming_window
| {
"alphanum_fraction": 0.600776699,
"author": null,
"avg_line_length": 34.7972972973,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "09e5d853c5733319524555077c149ed80ab622ed",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2022-01-15T21:48:35.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-12-15T02:04:58.000Z",
"max_forks_repo_head_hexsha": "2872dc514b638237771f4071195f7b8f90e0ce3d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "PanIGGCAS/SeisElastic2D_1.1",
"max_forks_repo_path": "specfem2d/utils/create_pressure_plane_wave_with_Hamming_window.f90",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "2872dc514b638237771f4071195f7b8f90e0ce3d",
"max_issues_repo_issues_event_max_datetime": "2021-05-27T09:36:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-31T03:36:34.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "PanIGGCAS/SeisElastic2D_1.1",
"max_issues_repo_path": "specfem2d/utils/create_pressure_plane_wave_with_Hamming_window.f90",
"max_line_length": 83,
"max_stars_count": 11,
"max_stars_repo_head_hexsha": "2872dc514b638237771f4071195f7b8f90e0ce3d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "PanIGGCAS/SeisElastic2D_1.1",
"max_stars_repo_path": "specfem2d/utils/create_pressure_plane_wave_with_Hamming_window.f90",
"max_stars_repo_stars_event_max_datetime": "2021-11-14T05:20:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-04T01:55:41.000Z",
"num_tokens": 711,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2575
} |
"""
bathy_smoother is a suite of tools for working with ROMS bathymetry.
(ripped from matlab script LP_bathymetry)
Requires:
NumPy (http://numpy.scipy.org)
lpsolve (http://lpsolve.sourceforge.net/)
Contains:
bathy_smoothing - Tools for smoothing the bathymetry
bathy_tools - Various bathymetry tools
LP_bathy_smoothing - Tools for smoothing the bathymetry using LP
LP_bathy_tools - LP tools for smoothing the bathymetry
LP_tools - Various LP tools
"""
doclines = __doc__.split("\n")
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('bathy_smoother',parent_package,top_path,
package_path='bathy_smoother')
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True)
# quiet=True)
define_macros = []
define_macros.append(('YY_NEVER_INTERACTIVE', None))
define_macros.append(('PARSER_LP', None))
define_macros.append(('INVERSE_ACTIVE', 'INVERSE_LUSOL'))
define_macros.append(('RoleIsExternalInvEngine', None))
sources = ['external/lp_solve_5.5/lp_MDO.c',
'external/lp_solve_5.5/shared/*.c',
'external/lp_solve_5.5/colamd/*.c',
'external/lp_solve_5.5/bfp/bfp_LUSOL/lp_LUSOL.c',
'external/lp_solve_5.5/bfp/bfp_LUSOL/LUSOL/lusol.c',
'external/lp_solve_5.5/ini.c',
'external/lp_solve_5.5/fortify.c',
'external/lp_solve_5.5/lp_rlp.c',
'external/lp_solve_5.5/lp_crash.c',
'external/lp_solve_5.5/lp_Hash.c',
'external/lp_solve_5.5/lp_lib.c',
'external/lp_solve_5.5/lp_wlp.c',
'external/lp_solve_5.5/lp_matrix.c',
'external/lp_solve_5.5/lp_mipbb.c',
'external/lp_solve_5.5/lp_MPS.c',
'external/lp_solve_5.5/lp_params.c',
'external/lp_solve_5.5/lp_presolve.c',
'external/lp_solve_5.5/lp_price.c',
'external/lp_solve_5.5/lp_pricePSE.c',
'external/lp_solve_5.5/lp_report.c',
'external/lp_solve_5.5/lp_scale.c',
'external/lp_solve_5.5/lp_simplex.c',
'external/lp_solve_5.5/lp_SOS.c',
'external/lp_solve_5.5/lp_utils.c',
'external/lp_solve_5.5/yacc_read.c'],
inc_dirs = ['external/lp_solve_5.5',
'external/lp_solve_5.5/bfp',
'external/lp_solve_5.5/bfp/bfp_LUSOL',
'external/lp_solve_5.5/bfp/bfp_LUSOL/LUSOL',
'external/lp_solve_5.5/colamd',
'external/lp_solve_5.5/shared'],
config.add_library('lpsolve55',
sources = sources,
include_dirs = inc_dirs,
macros=define_macros)
config.add_extension('lpsolve55',
sources = sources,
include_dirs = inc_dirs,
libraries = ['lpsolve55']
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(name = '',
version = '0.1',
description = doclines[0],
long_description = "\n".join(doclines[2:]),
url = 'https://github.com/ESMG/pyroms',
license = 'BSD',
platforms = ["any"],
configuration=configuration,
)
| {
"alphanum_fraction": 0.6076900838,
"author": null,
"avg_line_length": 37.597826087,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f469f44c68fb9d8923b0fc2ca312e20fd5b87b03",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-05-03T02:50:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-03T02:50:32.000Z",
"max_forks_repo_head_hexsha": "f02324a85be6c7694f83d2fb0ed0b27e869c89b9",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "NakamuraTakashi/pyroms",
"max_forks_repo_path": "bathy_smoother/setup.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f02324a85be6c7694f83d2fb0ed0b27e869c89b9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "NakamuraTakashi/pyroms",
"max_issues_repo_path": "bathy_smoother/setup.py",
"max_line_length": 68,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f02324a85be6c7694f83d2fb0ed0b27e869c89b9",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "NakamuraTakashi/pyroms",
"max_stars_repo_path": "bathy_smoother/setup.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-28T13:55:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-28T13:55:29.000Z",
"num_tokens": 921,
"path": null,
"reason": "from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3459
} |
subroutine percmacro
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this surboutine computes percolation by crack flow
!! ~ ~ ~ INCOMING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! inflpcp |mm H2O |amount of precipitation that infiltrates
!! |into soil (enters soil)
!! ihru |none |HRU number
!! sol_fc(:,:) |mm H2O |amount of water available to plants in soil
!! |layer at field capacity (fc-wp)
!! sol_nly(:) |none |numer of layers in soil profile
!! sol_st(:,:) |mm H2O |amount of water stored in the soil layer on
!! |any given day (less wilting point water)
!! sol_z(:,:) |mm |depth to bottom of soil layer
!! volcr(:,:) |mm |crack volume for soil layer
!! volcrmin |mm |minimum soil volume in profile
!! voltot |mm |total volume of cracks expressed as depth
!! |per unit area
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ OUTGOING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! crk |mm H2O |percolation due to crack flow
!! sepbtm(:) |mm H2O |percolation from bottom of soil profile for
!! |the day in HRU
!! sol_prk(:,:)|mm H2O |percolation storage array
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ LOCAL DEFINITIONS ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! crklch |none |
!! j |none |HRU number
!! ly |none |counter (soil layer)
!! sepcrk |mm H2O |water entering cracks in soil
!! xx |mm H2O |water deficiency in soil layer
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ SUBROUTINES/FUNCTIONS CALLED ~ ~ ~
!! Intrinsic: Min
!! ~ ~ ~ ~ ~ ~ END SPECIFICATIONS ~ ~ ~ ~ ~ ~
use parm
integer :: j, ly
real :: crklch = 0.5, xx
j = 0
j = ihru
sepcrk = 0.
sepcrk = Min(voltot, inflpcp)
sepcrktot = sepcrk
if (sepcrk > 1.e-4) then
do ly = sol_nly(j), 1, -1
crk = 0.
xx = 0.
if (ly == sol_nly(j)) then
crk = crklch * (volcr(ly,j) / (sol_z(ly,j) - sol_z(ly-1,j))
& * voltot - volcrmin)
if (crk < sepcrk) then
sepcrk = sepcrk - crk
sepbtm(j) = sepbtm(j) + crk
sol_prk(ly,j) = sol_prk(ly,j) + crk
else
sepbtm(j) = sepbtm(j) + sepcrk
sol_prk(ly,j) = sol_prk(ly,j) + sepcrk
sepcrk = 0.
end if
endif
xx = sol_fc(ly,j) - sol_st(ly,j)
if (xx > 0.) then
crk = Min(sepcrk, xx)
sol_st(ly,j) = sol_st(ly,j) + crk
sepcrk = sepcrk - crk
if (ly /= 1) sol_prk(ly-1,j) = sol_prk(ly-1,j) + crk
end if
if (sepcrk < 1.e-6) exit
end do
!! if soil layers filled and there is still water attributed to
!! crack flow, it is assumed to percolate out of bottom of profile
if (sepcrk > 1.e-4) then
sepbtm(j) = sepbtm(j) + sepcrk
sol_prk(sol_nly(j),j) = sol_prk(sol_nly(j),j) + sepcrk
end if
end if
return
end | {
"alphanum_fraction": 0.4072996562,
"author": null,
"avg_line_length": 39.8,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "a5cd5d08e9fabdd21940a96ecfbdb393add45015",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d2c3f6185acabf31440d613651192cdaabfe6b6c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gpignotti/swat_soil_moisture_sensitivity",
"max_forks_repo_path": "modified_source_code/percmacro.f",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "d2c3f6185acabf31440d613651192cdaabfe6b6c",
"max_issues_repo_issues_event_max_datetime": "2021-07-02T19:37:29.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-07-02T19:37:29.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gpignotti/swat_soil_moisture_sensitivity",
"max_issues_repo_path": "modified_source_code/percmacro.f",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d2c3f6185acabf31440d613651192cdaabfe6b6c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gpignotti/swat_soil_moisture_sensitivity",
"max_stars_repo_path": "modified_source_code/percmacro.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1238,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3781
} |
! { dg-options "-fgraphite-identity -g -O3 -ffast-math" }
MODULE erf_fn
CONTAINS
SUBROUTINE CALERF(ARG,RESULT,JINT)
DIMENSION A(5),B(4),C(9),D(8),P(6),Q(5)
IF (Y <= THRESH) THEN
DO I = 1, 3
XNUM = (XNUM + A(I)) * YSQ
XDEN = (XDEN + B(I)) * YSQ
END DO
RESULT = X * (XNUM + A(4)) / (XDEN + B(4))
END IF
END SUBROUTINE CALERF
END MODULE erf_fn
| {
"alphanum_fraction": 0.540201005,
"author": null,
"avg_line_length": 26.5333333333,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "9e488f4f1893202df8649da8a85d9bf84e892b7b",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 19,
"max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z",
"max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "brugger1/testsuite",
"max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/graphite/pr42186.f90",
"max_issues_count": 24,
"max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517",
"max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "brugger1/testsuite",
"max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/graphite/pr42186.f90",
"max_line_length": 57,
"max_stars_count": 12,
"max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "brugger1/testsuite",
"max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/graphite/pr42186.f90",
"max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z",
"num_tokens": 163,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 398
} |
#!/usr/bin/env python
import numpy
from supreme.lib import pywt
if __name__ == '__main__':
import pprint
data = numpy.ones((4, 4, 4, 4)) # 4D array
result = pywt.dwtn(data , 'db1') # sixteen 4D coefficient arrays
pprint.pprint(result)
| {
"alphanum_fraction": 0.6733067729,
"author": null,
"avg_line_length": 22.8181818182,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a9add5b43f74a754dcc298f4fe9bbb3f8a56efb1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 34,
"max_forks_repo_forks_event_max_datetime": "2022-02-27T13:39:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-22T20:54:40.000Z",
"max_forks_repo_head_hexsha": "c296722599363bd0cbcce6877bd9de9b066cb74b",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "KirillDZR/supreme",
"max_forks_repo_path": "supreme/lib/pywt/demo/dwt_multidim.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "c296722599363bd0cbcce6877bd9de9b066cb74b",
"max_issues_repo_issues_event_max_datetime": "2019-09-23T22:47:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-10-23T15:13:34.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "KirillDZR/supreme",
"max_issues_repo_path": "supreme/lib/pywt/demo/dwt_multidim.py",
"max_line_length": 67,
"max_stars_count": 95,
"max_stars_repo_head_hexsha": "c296722599363bd0cbcce6877bd9de9b066cb74b",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "KirillDZR/supreme",
"max_stars_repo_path": "supreme/lib/pywt/demo/dwt_multidim.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-07T16:02:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-17T09:48:20.000Z",
"num_tokens": 79,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 251
} |
import numpy as np
import pytest
from alibi_detect.cd import LSDDDriftOnline
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch
from alibi_detect.cd.tensorflow.lsdd_online import LSDDDriftOnlineTF
n, n_features = 100, 5
tests_lsdddriftonline = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_lsdddriftonline)
@pytest.fixture
def lsdddriftonline_params(request):
return tests_lsdddriftonline[request.param]
@pytest.mark.parametrize('lsdddriftonline_params', list(range(n_tests)), indirect=True)
def test_lsdddriftonline(lsdddriftonline_params):
backend = lsdddriftonline_params
x_ref = np.random.randn(*(n, n_features))
try:
cd = LSDDDriftOnline(x_ref=x_ref, ert=25, window_size=5, backend=backend, n_bootstraps=100)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, LSDDDriftOnlineTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, LSDDDriftOnlineTF)
else:
assert cd is None
| {
"alphanum_fraction": 0.7542056075,
"author": null,
"avg_line_length": 31.4705882353,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7194a12153ec4a469a264ad86049b00de47b9cde",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "436e99efb88c922dc04cec97b0b52d88bf439d65",
"max_forks_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_forks_repo_name": "ojcobb/alibi-detect",
"max_forks_repo_path": "alibi_detect/cd/tests/test_lsdd_online.py",
"max_issues_count": 40,
"max_issues_repo_head_hexsha": "436e99efb88c922dc04cec97b0b52d88bf439d65",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T22:10:24.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-21T15:59:53.000Z",
"max_issues_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_issues_repo_name": "ojcobb/alibi-detect",
"max_issues_repo_path": "alibi_detect/cd/tests/test_lsdd_online.py",
"max_line_length": 99,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "436e99efb88c922dc04cec97b0b52d88bf439d65",
"max_stars_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_stars_repo_name": "ojcobb/alibi-detect",
"max_stars_repo_path": "alibi_detect/cd/tests/test_lsdd_online.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 288,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1070
} |
[STATEMENT]
lemma set_list_of_vec: "set (list_of_vec v) = set\<^sub>v v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (list_of_vec v) = set\<^sub>v v
[PROOF STEP]
unfolding vec_set_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (list_of_vec v) = ($) v ` {..<dim_vec v}
[PROOF STEP]
by transfer auto | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Jordan_Normal_Form_Matrix",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 147,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
*..............................................................
integer function ODS_StrSize ( String )
implicit NONE
!-------------------------------------------------------------------------
! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS !
!-------------------------------------------------------------------------
!
! !ROUTINE: ODS_StrSize
!
! !DESCRIPTION:
!
! Determines the index number of the last non-blank character
! in the input string. This number is defined as the length
! of string which may contain embedded blanks.
!
! !INTERFACE: StrSize = ODS_StrSize ( String )
!
! !INPUT PARAMETER:
character String * (*) ! The string to be examined
!
! !REVISION HISTORY:
! 17May1996 Redder Origional version
! 01Nov1999 Redder Rewrote algorithm to match that for I90
!
!-------------------------------------------------------------------------
integer StrLen ! length of input string
integer StrSize ! temporary storage for output
integer iStr ! index variable for do loop
StrLen = len ( String )
StrSize = 1 ! Default for null string
do iStr = StrLen, 1, -1
StrSize = iStr
if ( String ( iStr:iStr ) .ne. ' ' ) go to 11
end do
StrSize = StrSize - 1
11 continue
ODS_StrSize = StrSize
return
end
| {
"alphanum_fraction": 0.4871244635,
"author": null,
"avg_line_length": 27.4117647059,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "599df1444fc5d9f392b1f4827469b6f82bb95ea0",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2022-03-11T16:26:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-07-05T18:00:44.000Z",
"max_forks_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf",
"max_forks_repo_licenses": [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
],
"max_forks_repo_name": "GEOS-ESM/AeroApps",
"max_forks_repo_path": "src/Shared/GMAO_Shared/GMAO_ods/ods_strsize.f",
"max_issues_count": 105,
"max_issues_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23",
"max_issues_repo_issues_event_max_datetime": "2022-03-22T02:12:16.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-07-08T19:27:23.000Z",
"max_issues_repo_licenses": [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
],
"max_issues_repo_name": "GEOS-ESM/GMAO_Shared",
"max_issues_repo_path": "GMAO_ods/ods_strsize.f",
"max_line_length": 74,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf",
"max_stars_repo_licenses": [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
],
"max_stars_repo_name": "GEOS-ESM/AeroApps",
"max_stars_repo_path": "src/Shared/GMAO_Shared/GMAO_ods/ods_strsize.f",
"max_stars_repo_stars_event_max_datetime": "2021-12-31T15:39:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-02T14:23:30.000Z",
"num_tokens": 329,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1398
} |
// #include <cstdint>
// #include <cstdio>
// #include <cstring>
// #include <iomanip>
// #include <iostream>
// #include <string>
// #include <vector>
#include <bitset>
#include <boost/format/format_fwd.hpp>
#include <boost/format/group.hpp>
#include <exception>
#include <ios>
#include <math.h>
#include <ostream>
#include <stdexcept>
#include <stdio.h>
// local libs
#include "args.hxx"
#include "boost/format.hpp"
const std::string version = "v1.0.1";
unsigned long long int hex_to_dec(std::string hex);
std::string hex_to_bin(std::string hex);
std::string dec_to_bin(unsigned long long int dec, std::string bin);
unsigned long long int bin_to_dec(std::string bin);
std::string bin_insert_space(std::string bin, int spacing_option);
std::string dec_to_bin_set_spacing(unsigned long long int dec);
int main(int argc, char **argv) {
args::ArgumentParser parser("Numvert", "Author: David Ryan\n~ ALL TO HIM");
args::Group inputGroup(parser,
"Input types:", args::Group::Validators::AtLeastOne);
args::Group outputFormatingGroup(
parser, "Output formating options:", args::Group::Validators::DontCare);
args::Group otherOptionsGroup(
parser, "Other options:", args::Group::Validators::DontCare);
args::HelpFlag help(parser, "help", "Display this help menu", {'?', "help"});
args::ValueFlagList<std::string> hexadecimalInputFlg(
inputGroup, "Hexadecimal",
"Hexadecimal input, case insensitive (do not apply 0x prefix)",
{'h', "hex"});
args::ValueFlagList<unsigned long long int> decimalInputFlg(
inputGroup, "Decimal", "Decimal input", {'d', "dec", 'i', "int"});
args::ValueFlagList<std::string> binaryInputFlg(
inputGroup, "Binary", "Binary input (do not apply 0b prefix)",
{'b', "bin"});
// args::ValueFlag<std::string> octalDeicmalInput(
// parser, "Octal Decimal", "Octal Decimal input", {'o', "oct"});
/* Output Formating Group */
args::Flag printFullFlg(outputFormatingGroup, "Print Full",
"Prints advanced output", {'f', "full"});
args::ValueFlag<int> printBinSpaceFlg(
outputFormatingGroup, "4 or 8",
"Prints spaces in between every forth binary bit or every byte",
{'s', "binary-spacing"});
args::Flag truncateBinFlg(outputFormatingGroup, "Truncate Binary",
"Truncates binary output to least significant '1'",
{"tb", "truncate-binary"});
args::Flag printCapHexFlg(outputFormatingGroup, "Print Hex with Cap",
"Prints all hexadecimal output with capital "
"letters (can not be used as input)",
{'H', "cap-hex"});
/* Other options */
args::Flag versionFlg(otherOptionsGroup, "Version",
"Prints version information", {"version"});
// args::Flag signedBinInput(otherOptionsGroup, "Signed Binary Input", "Marks
// all binary input as signed values", {"sb"}); // TODO: signed binary math
try {
parser.ParseCLI(argc, argv);
} catch (const args::Help &e) {
std::cout << parser;
return 0;
} catch (args::ParseError &e) {
std::cerr << e.what() << std::endl;
std::cerr << parser;
return 1;
} catch (args::ValidationError &e) {
std::cerr << e.what() << std::endl;
std::cerr << parser;
return 1;
}
std::cout << boost::format(
"+---------------------------------------numvert--+")
<< std::endl;
/************************
* HEXADECIMAL INPUT
*************************/
if (hexadecimalInputFlg) {
for (std::string hex : args::get(hexadecimalInputFlg)) {
// printf("[0x%s]\n\r", hex.c_str());
if (printCapHexFlg) {
std::cout << boost::format("[%1%]") %
boost::io::group(std::hex, std::showbase,
std::uppercase, hex_to_dec(hex))
<< std::endl;
std::cout << boost::format("\tHEX : %1%") %
boost::io::group(std::hex, std::showbase,
std::uppercase, hex_to_dec(hex))
<< std::endl;
// printf("\tHEX : 0x%llX\n\r", hex_to_dec(hex));
} else {
std::cout << boost::format("[%1%]") % boost::io::group(std::hex,
std::showbase,
hex_to_dec(hex))
<< std::endl;
std::cout << boost::format("\tHEX : %1%") %
boost::io::group(std::hex, std::showbase,
hex_to_dec(hex))
<< std::endl;
// std::cout << boost::format("\tHEX : %1%") % hex << std::endl;
// printf("\tHEX : 0x%s\n\r", hex.c_str());
}
std::cout << boost::format("\tDEC : %1%") %
boost::io::group(std::dec, hex_to_dec(hex))
<< std::endl;
// printf("\tDEC : %llu\n\r", hex_to_dec(hex));
if (printBinSpaceFlg) {
// std::cout << boost::format("\tBIN : 0b%1%") %
// bin_insert_space(hex_to_bin(hex),
// args::get(printBinSpaceFlg))
// << std::endl;
printf("\tBIN : 0b%s\n\r",
bin_insert_space(hex_to_bin(hex), args::get(printBinSpaceFlg))
.c_str());
} else {
// std::cout << boost::format("\tBIN : 0b%1%") % hex_to_bin(hex)
// << std::endl;
printf("\tBIN : 0b%s\n\r", hex_to_bin(hex).c_str());
}
std::cout << boost::format(
"+------------------------------------------------+")
<< std::endl;
// printf("+------------------------------------------------+\n\r");
}
}
/************************
* DECIMAL INPUT
*************************/
if (decimalInputFlg) {
for (unsigned long long int dec : decimalInputFlg) {
std::cout << boost::format("[%1%]") % dec << std::endl;
// printf("[%llu]\n\r", dec);
if (printCapHexFlg) {
std::cout << boost::format("\tHEX : %1%") %
boost::io::group(std::hex, std::showbase,
std::uppercase, dec)
<< std::endl;
// printf("\tHEX : 0x%llX\n\r", dec);
} else {
std::cout << boost::format("\tHEX : %1%") %
boost::io::group(std::hex, std::showbase, dec)
<< std::endl;
// printf("\tHEX : 0x%llx\n\r", dec);
}
std::cout << boost::format("\tDEC : %1%") % dec << std::endl;
// printf("\tDEC : %llu\n\r", dec);
std::string bin;
if (truncateBinFlg) // truncating bin option
{
bin = dec_to_bin(dec, "");
} else {
bin = dec_to_bin_set_spacing(dec);
}
if (printBinSpaceFlg) // bin spacing option
{
std::cout << boost::format("\tBIN : 0b%1%") %
bin_insert_space(bin, args::get(printBinSpaceFlg))
<< std::endl;
// printf("\tBIN : 0b%s\n\r",
// bin_insert_space(bin, args::get(printBinSpaceFlg)).c_str());
} else {
std::cout << boost::format("\tBIN : 0b%1%") % bin << std::endl;
// printf("\tBIN : 0b%s\n\r", bin.c_str());
}
std::cout << boost::format(
"+------------------------------------------------+") << std::endl;
}
}
/************************
* BINARY INPUT
*************************/
if (binaryInputFlg) {
for (std::string bin : args::get(binaryInputFlg)) {
std::cout << boost::format("[0b%1%]") % bin << std::endl;
// printf("[0b%s]\n\r", bin.c_str());
if (printCapHexFlg) {
std::cout << boost::format("\tHEX : %1%") %
boost::io::group(std::hex, std::showbase,
std::uppercase, bin_to_dec(bin))
<< std::endl;
// printf("\tHEX : 0x%llX\n\r", bin_to_dec(bin));
} else {
std::cout << boost::format("\tHEX : %1%") %
boost::io::group(std::hex, std::showbase,
bin_to_dec(bin))
<< std::endl;
// printf("\tHEX : 0x%llx\n\r", bin_to_dec(bin));
}
std::cout << boost::format("\tDEC : %1%") % bin_to_dec(bin) << std::endl;
// printf("\tDEC : %llu\n\r", bin_to_dec(bin));
std::cout << boost::format("\tBIN : 0b%1%") % bin << std::endl;
// printf("\tBIN : 0b%s\n\r", bin.c_str());
std::cout << boost::format(
"+------------------------------------------------+")
<< std::endl;
}
}
if (versionFlg) {
std::cout << boost::format("version: %1%") % version << std::endl;
// printf("version: %s\n\r", version.c_str());
std::cout << boost::format(
"+------------------------------------------------+")
<< std::endl;
}
return 0;
}
std::string hex_to_bin(std::string hex) {
std::string bin = "";
int i = 0;
try{
while (hex.at(i)) {
switch (hex.at(i)) {
case '0':
bin.append("0000");
break;
case '1':
bin.append("0001");
break;
case '2':
bin.append("0010");
break;
case '3':
bin.append("0011");
break;
case '4':
bin.append("0100");
break;
case '5':
bin.append("0101");
break;
case '6':
bin.append("0110");
break;
case '7':
bin.append("0111");
break;
case '8':
bin.append("1000");
break;
case '9':
bin.append("1001");
break;
case 'A':
case 'a':
bin.append("1010");
break;
case 'B':
case 'b':
bin.append("1011");
break;
case 'C':
case 'c':
bin.append("1100");
break;
case 'D':
case 'd':
bin.append("1101");
break;
case 'E':
case 'e':
bin.append("1110");
break;
case 'F':
case 'f':
bin.append("1111");
break;
default:
std::string returnError =
"ERROR: unable to parse Hex, invalid character: \'";
returnError = returnError + hex[i] + '\'';
return returnError;
}
i++;
}
} catch (const std::exception &e) {
return bin;
}
return bin;
}
unsigned long long int hex_to_dec(std::string hex) {
return stoull(hex, nullptr, 16);
}
std::string dec_to_bin(unsigned long long int dec, std::string bin) {
if (dec > 1) {
bin = dec_to_bin(dec / 2, bin);
}
bin.append(std::to_string(dec % 2));
return bin;
}
std::string bin_insert_space(std::string bin, int spacing_option) {
int spacer = 0;
for (int i = bin.length(); i > 0; i--) {
if (spacing_option == 4 && (spacer % 4) == 0) {
bin.insert(i, " ");
} else if (spacing_option == 8 && (spacer % 8) == 0) {
bin.insert(i, " ");
}
spacer++;
}
return bin;
}
unsigned long long int bin_to_dec(std::string bin) {
return stoull(bin, nullptr, 2);
}
std::string dec_to_bin_set_spacing(unsigned long long int dec) {
if (dec <= 16) {
std::bitset<4> bin(dec);
return bin.to_string();
} else if (dec <= 256) {
std::bitset<8> bin(dec);
return bin.to_string();
} else if (dec <= 4096) {
std::bitset<12> bin(dec);
return bin.to_string();
} else if (dec <= 65536) {
std::bitset<16> bin(dec);
return bin.to_string();
} else if (dec <= 1048576) {
std::bitset<20> bin(dec);
return bin.to_string();
} else if (dec <= 16777216) {
std::bitset<24> bin(dec);
return bin.to_string();
} else if (dec <= 268435456) {
std::bitset<28> bin(dec);
return bin.to_string();
} else if (dec <= 4294967296) {
std::bitset<432> bin(dec);
return bin.to_string();
}
std::bitset<64> bin(dec);
return bin.to_string();
}
// TODO: dec and hex to binary with set length | {
"alphanum_fraction": 0.4921292461,
"author": null,
"avg_line_length": 30.40302267,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "609ca2897132604510cc14c14437f105596c6ecc",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "64c72e6bb9fc4ad818690cd85a8d01e7aa950bd4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dj-ryan/numvert",
"max_forks_repo_path": "src/numvert.cpp",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "64c72e6bb9fc4ad818690cd85a8d01e7aa950bd4",
"max_issues_repo_issues_event_max_datetime": "2021-08-05T03:58:23.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-08-05T03:58:23.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dj-ryan/numvert",
"max_issues_repo_path": "src/numvert.cpp",
"max_line_length": 79,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "64c72e6bb9fc4ad818690cd85a8d01e7aa950bd4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dj-ryan/numvert",
"max_stars_repo_path": "src/numvert.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-01-06T12:19:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-30T03:52:10.000Z",
"num_tokens": 3185,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 12070
} |
'''
Write a short Python function that takes a positive integer n and returns
the sum of the squares of all the positive integers smaller than n.
'''
import numpy as np
def squaresum(a: int):
arr = np.arange(a) * np.arange(a)
arr = np.sum(arr)
return arr
'''
Write a short Python function that takes a positive integer n and returns
the sum of the squares of all the odd positive integers smaller than n.
'''
def squaresumodds(a: int):
arr = np.arange(a) * np.arange(a)
i = 0
res = []
while i < len(arr):
if bool(arr[i]&(1<<0)):
res.append(1)
else:
res.append(0)
i += 1
arr = arr * np.array(res)
arr = np.sum(arr)
return arr
'''
The sum of the squares of the first ten natural numbers is,
The square of the sum of the first ten natural numbers is,
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is .
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
'''
def sumsquaredif(a: int):
sum_squares = squaresum(a)
arr = np.arange(a)
arr = np.sum(arr)*np.sum(arr)
return (arr - sum_squares)
def main():
a = 101
print('a vector: ', np.arange(a))
print('sum of squares of a: ', squaresum(a))
print('sum of squares of odds of a: ', squaresumodds(a))
print('difference between sum of squares: ', sumsquaredif(a))
if __name__=='__main__':
main() | {
"alphanum_fraction": 0.6507304117,
"author": null,
"avg_line_length": 25.9655172414,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0535a2317c18294a96793e5f70d7a1410b3d50cc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f63a9f214750c5327cad792bfdcd3813b4659718",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "marchcarax/Exercises_python",
"max_forks_repo_path": "square_sum.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f63a9f214750c5327cad792bfdcd3813b4659718",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "marchcarax/Exercises_python",
"max_issues_repo_path": "square_sum.py",
"max_line_length": 118,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f63a9f214750c5327cad792bfdcd3813b4659718",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "marchcarax/Exercises_python",
"max_stars_repo_path": "square_sum.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 383,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1506
} |
#
# Packt Publishing
# Hands-on Tensorflow Lite for Intelligent Mobile Apps
# @author: Juan Miguel Valverde Martinez
#
# Section 2: Developing our first model in Tensorflow Lite
# Video 2-3: Parameter study
#
import numpy as np
import matplotlib.pyplot as plt
import random
import tensorflow as tf
import itertools
# Constants
NUM = 100
b_size = 5
epochs = 500
n_times = 5
# Functions
def generateNoise():
''' This function returns a small random value between -1.0 and 1.0
'''
if np.random.random()>0.5:
sign=1
else:
sign=-1
return (np.random.random()*sign)/4
def realFun(p1,p2):
''' Function the model will learn.
'''
return np.sin(p1)+3+np.cos(p2)
def noisyFun(p1,p2):
''' Measurement of the function including noise.
'''
return realFun(p1,p2)+generateNoise()
def shuffleData(data):
''' This function shuffles the rows of the given data
'''
randIndices = [x for x in range(data.shape[0])]
random.shuffle(randIndices)
return data[randIndices]
def generateSets(data,splits):
''' This function will generate Training, Testing and Validation sets given
the data (numpy array), splits (list of percentages, i.e. [0.7,0.2,0.1]
corresponding to 70% training, 20% testing and 10% validation data
respectively
'''
cut1 = int(splits[0]*NUM)
cut2 = int(splits[1]*NUM)
cut3 = int(splits[2]*NUM)
totalIndices = np.array([x for x in range(NUM)])
indicesTraining = np.array([i*NUM//cut1 + NUM//(2*cut1) for i in range(cut1)])
restIndices=np.array(sorted(list(set(totalIndices)-set(indicesTraining))))
indicesTesting = np.array([i*len(restIndices)//cut2 + len(restIndices)//(2*cut2) for i in range(cut2)])
indicesTesting = restIndices[indicesTesting]
indicesValidating = np.array(sorted(list(set(restIndices)-set(indicesTesting))))
return shuffleData(data[indicesTraining,:]),shuffleData(data[indicesTesting,:]),shuffleData(data[indicesValidating,:])
def createNoiseVector():
''' This function will create a vector that will contain a continuous
amount of noise within a range. For example, within the range between
20-25%, there will be a linear increment of the noise.
'''
# 2-3, 7,8
noiseVector = np.zeros(NUM)
initRange = int(NUM*0.2)
endRange = int(NUM*0.25)
noise = np.linspace(0.2,0.4,num=endRange-initRange)
noiseVector[initRange:endRange] = noise
initRange = int(NUM*0.7)
endRange = int(NUM*0.73)
noise = np.linspace(0.2,0.5,num=endRange-initRange)
noiseVector[initRange:endRange] = noise
return noiseVector
def getTensorByName(name):
for t in tf.global_variables():
if t.name==name:
return name
return None
def myLossFun(x,y):
return tf.reduce_sum(tf.abs(tf.subtract(x,y)))
# --- OUR CODE ---
### PART 0: THE DATA ###
realData = np.zeros(NUM)
noisyData = np.zeros((NUM,3))
results = np.zeros(NUM)
# Parameters
p1 = np.linspace(-2,2,num=NUM)
p2 = np.linspace(6,12,num=NUM)
for i in range(NUM):
realData[i] = realFun(p1[i],p2[i])
noisyData[i,:] = [p1[i],p2[i],noisyFun(p1[i],p2[i])]
noisyData[:,-1]+=createNoiseVector()
### PART 1: MODEL ###
def createGraph(config):
init,loss,lr,_ = config
# Network Parameters
Nodes = [2,5,10,12,14,15,12,8,5,1]
# tf Graph input
x = tf.placeholder("float", [None, Nodes[0]])
y = tf.placeholder("float", [None, Nodes[-1]])
# Hidden layer with RELU activation
with tf.variable_scope("layer1") as scope:
if init=="xavier":
W = tf.get_variable("W",shape=[Nodes[0],Nodes[1]],initializer=tf.contrib.layers.xavier_initializer())
else:
W = tf.get_variable("W",initializer=tf.random_normal([Nodes[0],Nodes[1]]))
b = tf.get_variable("b",initializer=tf.zeros([Nodes[1]]))
layer = tf.add(tf.matmul(x,W),b)
layer = tf.nn.sigmoid(layer)
for i in range(1,8):
with tf.variable_scope("layer"+str(i+1)) as scope:
if init=="xavier":
W = tf.get_variable("W",shape=[Nodes[i],Nodes[i+1]],initializer=tf.contrib.layers.xavier_initializer())
else:
W = tf.get_variable("W",initializer=tf.random_normal([Nodes[i],Nodes[i+1]]))
b = tf.get_variable("b",initializer=tf.zeros([Nodes[i+1]]))
layer = tf.add(tf.matmul(layer,W),b)
layer = tf.nn.sigmoid(layer)
with tf.variable_scope("layer9") as scope:
if init=="xavier":
W = tf.get_variable("W",shape=[Nodes[-2],Nodes[-1]],initializer=tf.contrib.layers.xavier_initializer())
else:
W = tf.get_variable("W",initializer=tf.random_normal([Nodes[-2],Nodes[-1]]))
b = tf.get_variable("b",initializer=tf.zeros([Nodes[-1]]))
# Output layer with linear activation
pred = tf.add(tf.matmul(layer,W),b)
# Define loss and optimizer
cost = loss(pred,y)
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost)
return x,y,cost,optimizer,pred
initP = ["xavier","random"]
lossesP = [tf.losses.absolute_difference,tf.losses.hinge_loss,myLossFun]
lrP = np.linspace(0.0001,0.1,10)
batchP = range(3,7)
parameters = [initP,lossesP,lrP,batchP]
allConfgs = list(itertools.product(*parameters))
### PART 2: RUN THE MODEL ###
# Launch the graph
counter = 0
for conf in allConfgs:
counter+=1
print(counter,len(allConfgs))
x,y,cost,optimizer,pred = createGraph(conf)
b_size = conf[-1]
lowestError = 999
for times in range(n_times):
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Generating datasets
trainingData,testingData,validationData = generateSets(noisyData,[0.7,0.2,0.1])
for e in range(epochs):
# Training cycle
for i in range(0,trainingData.shape[0],b_size):
x_raw = trainingData[i:i+b_size,:-1]
y_raw = trainingData[i:i+b_size,-1]
y_raw = np.reshape(y_raw,(y_raw.shape[0],1))
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer,cost], feed_dict={x: x_raw,y: y_raw})
# Testing cycle
tmpErrorNoisy = 0
tmpError = 0
for i in range(testingData.shape[0]):
x_raw = np.reshape(testingData[i,:-1],(1,2))
prediction = pred.eval({x:x_raw})
tmpErrorNoisy += abs(noisyFun(x_raw[0,0],x_raw[0,1])-prediction)
tmpError += abs(realFun(x_raw[0,0],x_raw[0,1])-prediction)
if tmpErrorNoisy<lowestError:
lowestError=tmpErrorNoisy
f=open("logRes","a")
f.write("{0},{1},{2},{3},{4}\n".format(conf[0],conf[1].func_name,conf[2],conf[3],float(lowestError)))
f.close()
tf.reset_default_graph()
| {
"alphanum_fraction": 0.6950929014,
"author": null,
"avg_line_length": 28.3648648649,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "06ac9d91384e6dbc71ab4e7a5125d6324c283aae",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 17,
"max_forks_repo_forks_event_max_datetime": "2021-03-22T03:24:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-03-12T11:59:03.000Z",
"max_forks_repo_head_hexsha": "8ac40d546dbe3da9aed0557e10169d31f13b8351",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Valmach/Android-Tensorflow-Lite-Intelligent-Visual-Mobile",
"max_forks_repo_path": "Section02/2-3 Parameter study.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "8ac40d546dbe3da9aed0557e10169d31f13b8351",
"max_issues_repo_issues_event_max_datetime": "2019-11-20T07:59:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-20T07:59:12.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Valmach/Android-Tensorflow-Lite-Intelligent-Visual-Mobile",
"max_issues_repo_path": "Section02/2-3 Parameter study.py",
"max_line_length": 119,
"max_stars_count": 21,
"max_stars_repo_head_hexsha": "8ac40d546dbe3da9aed0557e10169d31f13b8351",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Valmach/Android-Tensorflow-Lite-Intelligent-Visual-Mobile",
"max_stars_repo_path": "Section02/2-3 Parameter study.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-08T13:12:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-31T06:41:45.000Z",
"num_tokens": 1887,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6297
} |
#' @export
accel.est <- function (data) {
#for BCa conf.intervals
no.strata <- length(unique(data$Strata))
accel <- matrix(NA, 2, no.strata)
for (j in 1:no.strata) {
u <- rep(NA, data$nh[j])
for (i in 1:data$nh[j]) {
u[i] <- mean(data$yhi[[j]][-i])
}
accel[1, j] <- sum((mean(u) - u)^3)
accel[2, j] <- sum((mean(u) - u)^2)
}
return(accel)
}
| {
"alphanum_fraction": 0.464953271,
"author": null,
"avg_line_length": 26.75,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "34a53381300e2484b6be812e4ed338b43f5a530c",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cbca5a4c5c3bd68fd0885c1b84d9fd940ccf9d17",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "AMCOOK/bio.survey",
"max_forks_repo_path": "R/accel.est.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cbca5a4c5c3bd68fd0885c1b84d9fd940ccf9d17",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AMCOOK/bio.survey",
"max_issues_repo_path": "R/accel.est.r",
"max_line_length": 45,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cbca5a4c5c3bd68fd0885c1b84d9fd940ccf9d17",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AMCOOK/bio.survey",
"max_stars_repo_path": "R/accel.est.r",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 145,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 428
} |
import numpy as np
import grizzly.numpy_weld_impl as numpy_weld_impl
from grizzly.lazy_op import LazyOpResult
from weld.weldobject import *
class NumpyArrayWeld(LazyOpResult):
"""Summary
Attributes:
dim (TYPE): Description
expr (TYPE): Description
weld_type (TYPE): Description
"""
def __init__(self, expr, weld_type, dim=1):
"""Summary
Args:
expr (TYPE): Description
weld_type (TYPE): Description
dim (int, optional): Description
"""
self.expr = expr
self.weld_type = weld_type
self.dim = dim
def __div__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, LazyOpResult):
other = other.expr
return NumpyArrayWeld(
numpy_weld_impl.div(
self.expr,
other,
self.weld_type
),
self.weld_type
)
def sum(self):
"""Summary
Returns:
TYPE: Description
"""
return NumpyArrayWeld(
numpy_weld_impl.aggr(
self.expr,
"+",
0,
self.weld_type
),
self.weld_type,
0
)
def dot(matrix, vector):
"""
Computes the dot product between a matrix and a vector.
TODO: Make this more generic
Args:
matrix (TYPE): Description
vector (TYPE): Description
"""
matrix_weld_type = None
vector_weld_type = None
if isinstance(matrix, LazyOpResult):
matrix_weld_type = matrix.weld_type
matrix = matrix.expr
elif isinstance(matrix, np.ndarray):
matrix_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[
str(matrix.dtype)]
if isinstance(vector, LazyOpResult):
vector_weld_type = vector.weld_type
vector = vector.expr
elif isinstance(vector, np.ndarray):
vector_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[
str(vector.dtype)]
return NumpyArrayWeld(
numpy_weld_impl.dot(
matrix,
vector,
matrix_weld_type,
vector_weld_type),
WeldDouble())
def exp(vector):
"""
Computes a per-element exponent of the passed-in vector.
Args:
vector (TYPE): Description
"""
weld_type = None
if isinstance(vector, LazyOpResult):
weld_type = vector.weld_type
vector = vector.expr
elif isinstance(vector, np.ndarray):
weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[
str(vector.dtype)]
return NumpyArrayWeld(numpy_weld_impl.exp(vector, weld_type), WeldDouble())
| {
"alphanum_fraction": 0.5733380232,
"author": null,
"avg_line_length": 24.2991452991,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "82b904479c9b3850c9824937d64953dae099c265",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d29968e8493dcd8d4a5cce17eac9dd77a651d3af",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "errord/weld",
"max_forks_repo_path": "python/grizzly/grizzly/numpy_weld.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d29968e8493dcd8d4a5cce17eac9dd77a651d3af",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "errord/weld",
"max_issues_repo_path": "python/grizzly/grizzly/numpy_weld.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d29968e8493dcd8d4a5cce17eac9dd77a651d3af",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "errord/weld",
"max_stars_repo_path": "python/grizzly/grizzly/numpy_weld.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 619,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2843
} |
/*
==============================================================================
KratosStructuralApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
janosch.stascheit@rub.de
nagel@sd.rub.de
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
- Ruhr-University Bochum, Institute for Structural Mechanics, Germany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: anonymous $
// Date: $Date: 2008-10-23 14:27:01 $
// Revision: $Revision: 1.20 $
//
//
#if !defined(KRATOS_ADD_CUSTOM_UTILITIES_TO_PYTHON_H_INCLUDED )
#define KRATOS_ADD_CUSTOM_UTILITIES_TO_PYTHON_H_INCLUDED
// System includes
#include <boost/python.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
// External includes
#include "boost/smart_ptr.hpp"
// Project includes
#include "custom_python/add_custom_utilities_to_python.h"
#include "includes/define.h"
#include "custom_utilities/deactivation_utility.h"
#include "custom_utilities/variable_transfer_utility.h"
#ifdef _OPENMP
#include "custom_utilities/parallel_variable_transfer_utility.h"
#endif
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
#include "custom_utilities/contact_utility.h"
#include "custom_utilities/volume_utility.h"
#include "custom_utilities/restart_utility.h"
#include "custom_utilities/node_snapping_utility.h"
#include "custom_elements/rigid_body_3D.h"
#include "custom_utilities/output_utility.h"
#include "custom_utilities/dof_utility.h"
#include "custom_utilities/smoothing_utility.h"
//#include "custom_utilities/detect_elements_utility.h"
#include "custom_utilities/intra_fracture_triangle_utility.h"
#include "custom_utilities/inter_fracture_triangle_utility.h"
#include "custom_utilities/inter_fracture_tetrahedra_utility.h"
//#include "custom_utilities/mark_element_for_refinement.h"
#include "custom_utilities/disconnect_utility.h"
namespace Kratos
{
namespace Python
{
using namespace boost::python;
void AddNewRigidBody3D( ModelPart& structural_model_part,
ModelPart& skin_model_part,
Variable<double>& rSelectionVariable,
double selection_value,
Node<3>::Pointer CenterNode,
Element::PropertiesType::Pointer pProperties,
double nodal_mass,
Matrix& Inertia
)
{
Geometry<Node<3> >::Pointer skin_nodes_geometry( new Geometry<Node<3> > ); ;
//selecting the nodes in the model part having rSelectionVariable==selection_value
for ( ModelPart::NodesContainerType::iterator it = skin_model_part.NodesBegin(); it != skin_model_part.NodesEnd(); it++ )
{
if ( it->FastGetSolutionStepValue( rSelectionVariable ) == selection_value )
skin_nodes_geometry->push_back( *( it.base() ) );
}
//creating a geometry containing the center node
Geometry<Node<3> >::Pointer center_node_geometry( new Geometry<Node<3> > ) ;
center_node_geometry->push_back( Node<3>::Pointer( CenterNode ) );
unsigned int last_id = 1;
if ( structural_model_part.Elements().size() != 0 )
last_id = ( structural_model_part.ElementsEnd() - 1 )->Id() + 1;
array_1d<double, 3> zero = ZeroVector( 3 );
Element::Pointer new_el = RigidBody3D::Pointer( new RigidBody3D( last_id,
center_node_geometry,
pProperties,
skin_nodes_geometry,
nodal_mass,
Inertia, zero, zero ) );
structural_model_part.Elements().push_back(
new_el
);
}
void AddNewRigidBodyAndSpring3D( ModelPart& structural_model_part,
ModelPart& skin_model_part,
Variable<double>& rSelectionVariable,
double selection_value,
Node<3>::Pointer CenterNode,
Element::PropertiesType::Pointer pProperties,
double nodal_mass,
Matrix& Inertia,
array_1d<double, 3>& translational_stiffness,
array_1d<double, 3>& rotational_stiffness
)
{
Geometry<Node<3> >::Pointer skin_nodes_geometry( new Geometry<Node<3> > ); ;
//selecting the nodes in the model part having rSelectionVariable==selection_value
for ( ModelPart::NodesContainerType::iterator it = skin_model_part.NodesBegin(); it != skin_model_part.NodesEnd(); it++ )
{
if ( it->FastGetSolutionStepValue( rSelectionVariable ) == selection_value )
skin_nodes_geometry->push_back( *( it.base() ) );
}
//creating a geometry containing the center node
Geometry<Node<3> >::Pointer center_node_geometry( new Geometry<Node<3> > ) ;
center_node_geometry->push_back( Node<3>::Pointer( CenterNode ) );
unsigned int last_id = 1;
if ( structural_model_part.Elements().size() != 0 )
last_id = ( structural_model_part.ElementsEnd() - 1 )->Id() + 1;
array_1d<double, 3> zero = ZeroVector( 3 );
Element::Pointer new_el = RigidBody3D::Pointer( new RigidBody3D( last_id,
center_node_geometry,
pProperties,
skin_nodes_geometry,
nodal_mass,
Inertia,
translational_stiffness,
rotational_stiffness ) );
structural_model_part.Elements().push_back(
new_el
);
}
void DoubleTransferVariablesToNodes(VariableTransferUtility& dummy,
ModelPart& model_part, Variable<double>& rThisVariable)
{
dummy.TransferVariablesToNodes(model_part, rThisVariable);
}
void VectorTransferVariablesToNodes(VariableTransferUtility& dummy,
ModelPart& model_part, Variable<Vector>& rThisVariable)
{
dummy.TransferVariablesToNodes(model_part, rThisVariable);
}
void DoubleTransferVariablesToGaussPoints(VariableTransferUtility& dummy,
ModelPart& source_model_part, ModelPart& target_model_part, Variable<double>& rThisVariable)
{
dummy.TransferVariablesToGaussPoints(source_model_part, target_model_part, rThisVariable);
}
void VectorTransferVariablesToGaussPoints(VariableTransferUtility& dummy,
ModelPart& source_model_part, ModelPart& target_model_part, Variable<Vector>& rThisVariable)
{
dummy.TransferVariablesToGaussPoints(source_model_part, target_model_part, rThisVariable);
}
void AddCustomUtilitiesToPython()
{
class_<DeactivationUtility, boost::noncopyable >
( "DeactivationUtility", init<>() )
.def( init<int>() )
.def( "Deactivate", &DeactivationUtility::Deactivate )
.def( "Reactivate", &DeactivationUtility::Reactivate )
.def( "ReactivateStressFree", &DeactivationUtility::ReactivateStressFree )
.def( "ReactivateAll", &DeactivationUtility::ReactivateAll )
.def( "Initialize", &DeactivationUtility::Initialize )
.def( "GetName", &DeactivationUtility::GetName<Element> )
.def( "GetName", &DeactivationUtility::GetName<Condition> )
;
class_<VariableTransferUtility, boost::noncopyable >
( "VariableTransferUtility", init<>() )
.def(init<VariableTransferUtility::LinearSolverType::Pointer>())
.def( "TransferNodalVariables", &VariableTransferUtility::TransferNodalVariables )
.def( "TransferConstitutiveLawVariables", &VariableTransferUtility::TransferConstitutiveLawVariables )
.def( "TransferInSituStress", &VariableTransferUtility::TransferInSituStress )
.def( "TransferPrestress", &VariableTransferUtility::TransferPrestress )
.def( "TransferPrestressIdentically", &VariableTransferUtility::TransferPrestressIdentically )
.def( "TransferSpecificVariable", &VariableTransferUtility::TransferSpecificVariable )
.def( "InitializeModelPart", &VariableTransferUtility::InitializeModelPart )
.def("TransferVariablesToNodes", &DoubleTransferVariablesToNodes)
.def("TransferVariablesToNodes", &VectorTransferVariablesToNodes)
.def("TransferVariablesToGaussPoints", &DoubleTransferVariablesToGaussPoints)
.def("TransferVariablesToGaussPoints", &VectorTransferVariablesToGaussPoints)
;
#ifdef _OPENMP
class_<ParallelVariableTransferUtility, boost::noncopyable >
( "ParallelVariableTransferUtility",
init<>() )
.def( "TransferNodalVariables", &ParallelVariableTransferUtility::TransferNodalVariables )
.def( "TransferConstitutiveLawVariables", &ParallelVariableTransferUtility::TransferConstitutiveLawVariables )
.def( "TransferInSituStress", &ParallelVariableTransferUtility::TransferInSituStress )
.def( "InitializeModelPart", &ParallelVariableTransferUtility::InitializeModelPart )
;
#endif
class_<ContactUtility, boost::noncopyable >
( "ContactUtility",
init<int>() )
.def( "SetUpContactConditions", &ContactUtility::SetUpContactConditions )
.def( "SetUpContactConditionsLagrangeTying", &ContactUtility::SetUpContactConditionsLagrangeTying )
.def( "Update", &ContactUtility::Update )
.def( "IsConverged", &ContactUtility::IsConverged )
.def( "Clean", &ContactUtility::Clean )
.def( "CleanLagrangeTying", &ContactUtility::CleanLagrangeTying )
;
// VM
class_<VolumeUtility, boost::noncopyable >
( "VolumeUtility",
init<int>() )
.def( "Calculate_this_Volume", &VolumeUtility::CalculateVolume ) // VM
;
//VM
class_<RestartUtility, boost::noncopyable >
( "RestartUtility",
init< std::string const& >() )
.def( "ChangeFileName", &RestartUtility::ChangeFileName )
.def( "StoreNodalVariables", &RestartUtility::StoreNodalVariables )
.def( "WriteNodalVariables", &RestartUtility::WriteNodalVariables )
.def( "StoreInSituStress", &RestartUtility::StoreInSituStress )
.def( "WriteConstitutiveLawVariables", &RestartUtility::WriteConstitutiveLawVariables )
.def( "StoreConstitutiveLawVariables", &RestartUtility::StoreConstitutiveLawVariables )
.def( "WriteInSituStress", &RestartUtility::WriteInSituStress )
;
class_<NodeSnappingUtility, boost::noncopyable >
( "NodeSnappingUtility",
init<>() )
.def( "MoveNode", &NodeSnappingUtility::MoveNode )
.def( "AdjustNodes", &NodeSnappingUtility::AdjustNodes )
.def( "AdjustToCircle", &NodeSnappingUtility::AdjustToCircle )
.def( "AdjustToCylinder", &NodeSnappingUtility::AdjustToCylinder )
.def( "AdjustToClosedCylinder", &NodeSnappingUtility::AdjustToClosedCylinder )
.def( "IdentifyInsideElements", &NodeSnappingUtility::IdentifyInsideElements )
.def( "SetInsituStress", &NodeSnappingUtility::SetInsituStress )
.def( "ExtractCapNodes", &NodeSnappingUtility::ExtractCapNodes )
.def( "TestElements", &NodeSnappingUtility::TestElements )
;
class_<OutputUtility, boost::noncopyable >
( "OutputUtility",
init<>() )
.def( "GetStrain", &OutputUtility::GetStrain )
.def( "GetStress", &OutputUtility::GetStress )
.def( "GetInternalVariables", &OutputUtility::GetInternalVariables )
;
def( "AddNewRigidBody3D", AddNewRigidBody3D );
def( "AddNewRigidBodyAndSpring3D", AddNewRigidBodyAndSpring3D );
;
/*
class_<Detect_Elements_And_Nodes, boost::noncopyable >
("DetectElementsAndNodes", init<ModelPart&, int >() )
.def("DetectNode", &Detect_Elements_And_Nodes::Detect_Node_To_Be_Splitted)
.def("DetectElements", &Detect_Elements_And_Nodes::Detect_Elements_To_Be_Splitted)
.def("CalculateMapFailure", &Detect_Elements_And_Nodes::Calculate_Map_Failure)
.def("Finalize", &Detect_Elements_And_Nodes::Finalize)
;
*/
class_<Smoothing_Utility, boost::noncopyable >
( "SmoothingUtility", init<ModelPart&, int >() )
.def( "WeightedRecoveryGradients", &Smoothing_Utility::WeightedRecoveryGradients<double> )
.def( "WeightedRecoveryGradients", &Smoothing_Utility::WeightedRecoveryGradients<Matrix> ) // for matrices
.def( "InterpolatedRecoveryGradients", &Smoothing_Utility::InterpolatedRecoveryGradients<Matrix> )
.def( "SettingNodalValues", &Smoothing_Utility::SettingNodalValues )
.def( "RecomputeValuesForNewMesh", &Smoothing_Utility::Recompute_Values_For_New_Mesh )
.def( "Finalize", &Smoothing_Utility::Finalize )
.def( "SettingNodalValues", &Smoothing_Utility::SettingNodalValues )
;
class_<Disconnect_Triangle_Utilities, boost::noncopyable >
( "DisconnectTriangle", init<ModelPart&>() )
.def( "DisconnectElements", &Disconnect_Triangle_Utilities::Disconnect_Elements )
;
class_<Intra_Fracture_Triangle, boost::noncopyable >
( "IntraFractureTriangle", init<ModelPart&, int >() )
.def( "DetectAndSplitElements", &Intra_Fracture_Triangle::Detect_And_Split_Elements )
;
class_<Inter_Fracture_Triangle, boost::noncopyable >
( "InterFractureTriangle", init<ModelPart&, int >() )
.def( "DetectAndSplitElementsHeuristicFormula", &Inter_Fracture_Triangle::Detect_And_Split_Elements_Heuristic_Formula )
.def( "DetectAndSplitElements", &Inter_Fracture_Triangle::Detect_And_Split_Elements )
.def( "Finalize", &Inter_Fracture_Triangle::Finalize )
;
class_<Inter_Fracture_Tetrahedra, boost::noncopyable >
( "InterFractureTetrahedra", init<ModelPart&, int >() )
.def( "DetectAndSplitElements", &Inter_Fracture_Tetrahedra::Detect_And_Split_Elements )
;
class_<DofUtility, boost::noncopyable >
( "DofUtility", init<>() )
.def( "ListDofs", &DofUtility::ListDofs )
;
}
} // namespace Python.
} // namespace Kratos.
#endif // KRATOS_ADD_CUSTOM_UTILITIES_TO_PYTHON_H_INCLUDED defined
| {
"alphanum_fraction": 0.6924210391,
"author": null,
"avg_line_length": 41.9596774194,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "c10aed5578d72accac67c44f6762a92e9bac30f5",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-06-12T08:51:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-12T08:51:24.000Z",
"max_forks_repo_head_hexsha": "e977752722e8ef1b606f25618c4bf8fd04c434cc",
"max_forks_repo_licenses": [
"BSD-4-Clause"
],
"max_forks_repo_name": "AndreaVoltan/MyKratos7.0",
"max_forks_repo_path": "applications/structural_application/custom_python/add_custom_utilities_to_python.cpp",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "e977752722e8ef1b606f25618c4bf8fd04c434cc",
"max_issues_repo_issues_event_max_datetime": "2020-05-02T14:22:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-30T19:19:09.000Z",
"max_issues_repo_licenses": [
"BSD-4-Clause"
],
"max_issues_repo_name": "AndreaVoltan/MyKratos7.0",
"max_issues_repo_path": "applications/structural_application/custom_python/add_custom_utilities_to_python.cpp",
"max_line_length": 125,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "e977752722e8ef1b606f25618c4bf8fd04c434cc",
"max_stars_repo_licenses": [
"BSD-4-Clause"
],
"max_stars_repo_name": "AndreaVoltan/MyKratos7.0",
"max_stars_repo_path": "applications/structural_application/custom_python/add_custom_utilities_to_python.cpp",
"max_stars_repo_stars_event_max_datetime": "2021-04-14T19:40:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-04-30T19:13:08.000Z",
"num_tokens": 3728,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 15609
} |
#!/usr/bin/python
# -*- coding:utf8 -*-
from unittest import TestCase
import numpy as np
import math
import Control_Exp1001 as CE
import os
import json
from Control_Exp1001.demo.thickener.run import vi_compare_sample
class TestVi_compare_sample(TestCase):
def test_vi_compare_sample(self):
vi_compare_sample()
| {
"alphanum_fraction": 0.7791411043,
"author": null,
"avg_line_length": 19.1764705882,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bb14aa31315b1ec170efcff9c9f4104f6b8f89db",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-09-15T14:33:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-09-15T14:33:40.000Z",
"max_forks_repo_head_hexsha": "f9fa0a46d838915de9c5f16c315c6c9eaba07f62",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "y18810919727/Control_Exp1001",
"max_forks_repo_path": "Control_Exp1001/demo/thickener_noise/test/test_vi_compare_sample.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f9fa0a46d838915de9c5f16c315c6c9eaba07f62",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "y18810919727/Control_Exp1001",
"max_issues_repo_path": "Control_Exp1001/demo/thickener_noise/test/test_vi_compare_sample.py",
"max_line_length": 64,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f9fa0a46d838915de9c5f16c315c6c9eaba07f62",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "y18810919727/Control_Exp1001",
"max_stars_repo_path": "Control_Exp1001/demo/thickener_noise/test/test_vi_compare_sample.py",
"max_stars_repo_stars_event_max_datetime": "2019-01-03T01:38:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-01-03T01:38:50.000Z",
"num_tokens": 75,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 326
} |
import .lub_prodSets
namespace xena -- hide
/-
# Chapter 3 : Sup and Inf
## Level 10
-/
-- main result in lemma sup_mem_prod_of_sets
-- hide
--def mem_prod_sets (A : set ℝ) (B : set ℝ) := { x : ℝ | ∃ y ∈ A, ∃ z ∈ B, x = y * z}
/-
Intermediary result `zero_in_prod` proved in sets_level08.
Intermediary result `mem_prod_sets_lub_proof` in previous level.
-/
/- Lemma
For two non-empty sets of reals $A$ and $B$, it is not in general true that
$$ \textrm{sup} (A \cdot B) = \textrm{sup} (A) \cdot \textrm{sup}(B)$$
where $A \cdot B$ is defined pointwise as above.
-/
lemma sup_mem_prod_of_sets : ¬ ( ∀ (A B : set ℝ) (a b : ℝ),
A.nonempty ∧ B.nonempty → bdd_below A ∧ bdd_below B →
is_lub A a ∧ is_lub B b →
is_lub (mem_prod_sets A B) (a * b) ) :=
begin
intro H,
-- do an example with A = [-2,-1], B = [0,3]
set A1 : set ℝ := set.Icc (-2:ℝ) (-1:ℝ) with hA,
set B1 : set ℝ := set.Icc (0:ℝ) (3:ℝ) with hB,
set a : ℝ := (-1:ℝ) with ha,
set b : ℝ := (3 : ℝ) with hb,
have G := H A1 B1,
have h1A : A1.nonempty, simp, norm_num,
have h1B : B1.nonempty, simp, norm_num,
have F := G a b (and.intro h1A h1B),
have h11 : ((-2:ℝ) ≤ -1), norm_num,
have h21 : (0:ℝ) ≤ (3:ℝ), norm_num,
have h2A : bdd_below A1,
-- use the definition in bounds.lean
have h12 := is_glb_Icc h11,
cases h12 with hh hhh,
existsi (-2:ℝ), exact hh,
have h2B : bdd_below B1,
have h22 := is_glb_Icc h21,
cases h22 with hh hhh,
existsi (0:ℝ), exact hh,
have E := F (and.intro h2A h2B),
have h1 : is_lub A1 a,
exact is_lub_Icc h11,
have h2 : is_lub B1 b,
exact is_lub_Icc h21,
have D := E (and.intro h1 h2),
rw ha at h1, rw hb at h2, rw ha at D, rw hb at D,
have E : is_lub (mem_prod_sets A1 B1) 0,
exact mem_prod_sets_lub_proof,
have E1 := is_lub.unique D E,
linarith, done
end
end xena -- hide
| {
"alphanum_fraction": null,
"author": "ImperialCollegeLondon",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/ImperialCollegeLondon-real-number-game/real-number-game-f9dcb7d9255a79b57e62038228a23346c2dc301b/src/game/sup_inf/supProdSets.lean",
"reason": null,
"repo": "real-number-game",
"save_path": "github-repos/lean/ImperialCollegeLondon-real-number-game",
"sha": "f9dcb7d9255a79b57e62038228a23346c2dc301b",
"size": null
} |
import torch
import numpy as np
from expreplay import ReplayMemory
from DQNModel import DQN
from evaluator import Evaluator
from tqdm import tqdm
class Trainer(object):
def __init__(self,
env,
eval_env=None,
image_size=(45, 45, 45),
update_frequency=4,
replay_buffer_size=1e6,
init_memory_size=5e4,
max_episodes=100,
steps_per_episode=50,
eps=1,
min_eps=0.1,
delta=0.001,
batch_size=4,
gamma=0.9,
number_actions=6,
frame_history=4,
model_name="CommNet",
logger=None,
train_freq=1,
team_reward=False,
attention=False,
lr=1e-3,
scheduler_gamma=0.5,
scheduler_step_size=100
):
self.env = env
self.eval_env = eval_env
self.agents = env.agents
self.image_size = image_size
self.update_frequency = update_frequency
self.replay_buffer_size = replay_buffer_size
self.init_memory_size = init_memory_size
self.max_episodes = max_episodes
self.steps_per_episode = steps_per_episode
self.eps = eps
self.min_eps = min_eps
self.delta = delta
self.batch_size = batch_size
self.gamma = gamma
self.number_actions = number_actions
self.frame_history = frame_history
self.epoch_length = self.env.files.num_files
self.best_val_distance = float('inf')
self.buffer = ReplayMemory(
self.replay_buffer_size,
self.image_size,
self.frame_history,
self.agents)
self.dqn = DQN(
self.agents,
self.frame_history,
logger=logger,
type=model_name,
collective_rewards=team_reward,
attention=attention,
lr=lr,
scheduler_gamma=scheduler_gamma,
scheduler_step_size=scheduler_step_size)
self.dqn.q_network.train(True)
self.evaluator = Evaluator(eval_env,
self.dqn.q_network,
logger,
self.agents,
steps_per_episode)
self.logger = logger
self.train_freq = train_freq
def train(self):
self.logger.log(self.dqn.q_network)
self.init_memory()
episode = 1
acc_steps = 0
epoch_distances = []
while episode <= self.max_episodes:
# Reset the environment for the start of the episode.
obs = self.env.reset()
terminal = [False for _ in range(self.agents)]
losses = []
score = [0] * self.agents
for step_num in range(self.steps_per_episode):
acc_steps += 1
acts, q_values = self.get_next_actions(
self.buffer.recent_state())
# Step the agent once, and get the transition tuple
obs, reward, terminal, info = self.env.step(
np.copy(acts), q_values, terminal)
score = [sum(x) for x in zip(score, reward)]
self.buffer.append((obs, acts, reward, terminal))
if acc_steps % self.train_freq == 0:
mini_batch = self.buffer.sample(self.batch_size)
loss = self.dqn.train_q_network(mini_batch, self.gamma)
losses.append(loss)
if all(t for t in terminal):
break
epoch_distances.append([info['distError_' + str(i)]
for i in range(self.agents)])
self.append_episode_board(info, score, "train", episode)
if (episode * self.epoch_length) % self.update_frequency == 0:
self.dqn.copy_to_target_network()
self.eps = max(self.min_eps, self.eps - self.delta)
# Every epoch
if episode % self.epoch_length == 0:
self.append_epoch_board(epoch_distances, self.eps, losses,
"train", episode)
self.validation_epoch(episode)
self.dqn.save_model(name="latest_dqn.pt", forced=True)
self.dqn.scheduler.step()
epoch_distances = []
episode += 1
def init_memory(self):
self.logger.log("Initialising memory buffer...")
pbar = tqdm(desc="Memory buffer", total=self.init_memory_size)
while len(self.buffer) < self.init_memory_size:
# Reset the environment for the start of the episode.
obs = self.env.reset()
terminal = [False for _ in range(self.agents)]
steps = 0
for _ in range(self.steps_per_episode):
steps += 1
acts, q_values = self.get_next_actions(obs)
obs, reward, terminal, info = self.env.step(
acts, q_values, terminal)
self.buffer.append((obs, acts, reward, terminal))
if all(t for t in terminal):
break
pbar.update(steps)
pbar.close()
self.logger.log("Memory buffer filled")
def validation_epoch(self, episode):
if self.eval_env is None:
return
self.dqn.q_network.train(False)
epoch_distances = []
for k in range(self.eval_env.files.num_files):
self.logger.log(f"eval episode {k}")
(score, start_dists, q_values,
info) = self.evaluator.play_one_episode()
epoch_distances.append([info['distError_' + str(i)]
for i in range(self.agents)])
val_dists = self.append_epoch_board(epoch_distances, name="eval",
episode=episode)
if (val_dists < self.best_val_distance):
self.logger.log("Improved new best mean validation distances")
self.best_val_distance = val_dists
self.dqn.save_model(name="best_dqn.pt", forced=True)
self.dqn.q_network.train(True)
def append_episode_board(self, info, score, name="train", episode=0):
dists = {str(i):
info['distError_' + str(i)] for i in range(self.agents)}
self.logger.write_to_board(f"{name}/dist", dists, episode)
scores = {str(i): score[i] for i in range(self.agents)}
self.logger.write_to_board(f"{name}/score", scores, episode)
def append_epoch_board(self, epoch_dists, eps=0, losses=[],
name="train", episode=0):
epoch_dists = np.array(epoch_dists)
if name == "train":
lr = self.dqn.scheduler.state_dict()["_last_lr"]
self.logger.write_to_board(name, {"eps": eps, "lr": lr}, episode)
if len(losses) > 0:
loss_dict = {"loss": sum(losses) / len(losses)}
self.logger.write_to_board(name, loss_dict, episode)
for i in range(self.agents):
mean_dist = sum(epoch_dists[:, i]) / len(epoch_dists[:, i])
mean_dist_dict = {str(i): mean_dist}
self.logger.write_to_board(
f"{name}/mean_dist", mean_dist_dict, episode)
min_dist_dict = {str(i): min(epoch_dists[:, i])}
self.logger.write_to_board(
f"{name}/min_dist", min_dist_dict, episode)
max_dist_dict = {str(i): max(epoch_dists[:, i])}
self.logger.write_to_board(
f"{name}/max_dist", max_dist_dict, episode)
return np.array(list(mean_dist_dict.values())).mean()
def get_next_actions(self, obs_stack):
# epsilon-greedy policy
if np.random.random() < self.eps:
q_values = np.zeros((self.agents, self.number_actions))
actions = np.random.randint(self.number_actions, size=self.agents)
else:
actions, q_values = self.get_greedy_actions(
obs_stack, doubleLearning=True)
return actions, q_values
def get_greedy_actions(self, obs_stack, doubleLearning=True):
inputs = torch.tensor(obs_stack).unsqueeze(0)
if doubleLearning:
q_vals = self.dqn.q_network.forward(inputs).detach().squeeze(0)
else:
q_vals = self.dqn.target_network.forward(
inputs).detach().squeeze(0)
idx = torch.max(q_vals, -1)[1]
greedy_steps = np.array(idx, dtype=np.int32).flatten()
return greedy_steps, q_vals.data.numpy()
| {
"alphanum_fraction": 0.5506653019,
"author": null,
"avg_line_length": 42.0717703349,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "077d35d89c859037d35b38c8da1da89839de6bbc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9c707caf13e8054a49b34115d615d83f0b34ba5c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "beeps82/rl-medical-1",
"max_forks_repo_path": "src/trainer.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9c707caf13e8054a49b34115d615d83f0b34ba5c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "beeps82/rl-medical-1",
"max_issues_repo_path": "src/trainer.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9c707caf13e8054a49b34115d615d83f0b34ba5c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "beeps82/rl-medical-1",
"max_stars_repo_path": "src/trainer.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1816,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8793
} |
{-
Normalize Integer Matrices
-}
{-# OPTIONS --safe #-}
module Cubical.Experiments.IntegerMatrix where
open import Cubical.Foundations.Prelude
open import Cubical.Data.Nat
open import Cubical.Data.Int
open import Cubical.Data.FinData
open import Cubical.Data.List
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.CommRing.Instances.Int
renaming (ℤ to ℤRing)
open import Cubical.Algebra.Matrix
open import Cubical.Algebra.Matrix.CommRingCoefficient
open import Cubical.Algebra.IntegerMatrix.Smith
open import Cubical.Algebra.IntegerMatrix.Diagonalization
private
variable
m n : ℕ
open Coefficient ℤRing
-- Get divisors directly
open isSmithNormal
open Smith
open isDiagonal
open Diag
getElemDiv : Mat m n → List ℤ
getElemDiv M = smith M .isnormal .divs .fst
getDiagDiv : Mat m n → List ℤ
getDiagDiv M = diagonalize M .isdiag .divs .fst
-- Constructing matrices
makeMat2×2 : ℤ → ℤ → ℤ → ℤ → Mat 2 2
makeMat2×2 a00 _ _ _ zero zero = a00
makeMat2×2 _ a01 _ _ zero one = a01
makeMat2×2 _ _ a10 _ one zero = a10
makeMat2×2 _ _ _ a11 one one = a11
makeMat3×3 : ℤ → ℤ → ℤ → ℤ → ℤ → ℤ → ℤ → ℤ → ℤ → Mat 3 3
makeMat3×3 a00 _ _ _ _ _ _ _ _ zero zero = a00
makeMat3×3 _ a01 _ _ _ _ _ _ _ zero one = a01
makeMat3×3 _ _ a02 _ _ _ _ _ _ zero two = a02
makeMat3×3 _ _ _ a10 _ _ _ _ _ one zero = a10
makeMat3×3 _ _ _ _ a11 _ _ _ _ one one = a11
makeMat3×3 _ _ _ _ _ a12 _ _ _ one two = a12
makeMat3×3 _ _ _ _ _ _ a20 _ _ two zero = a20
makeMat3×3 _ _ _ _ _ _ _ a21 _ two one = a21
makeMat3×3 _ _ _ _ _ _ _ _ a22 two two = a22
-- The Tests
-- One can add flag "-vprofile.interactive:10" to this file,
-- then C-c C-n to run these tests and also get the time.
-- It turns out that, "smith" is much slower than "diagonalize"
-- and it doesn't work even for simple 3×3-matrices.
-- The "diagonalize" works only for very simple 3×3-matrices.
-- One subtle point is, if one only do one-step in Smith normalization
-- and simply add the time cost in each steps,
-- the result is far less than running the whole function "smith".
-- So the recursive procedure slows down the procedure
-- for some reasons I don't fully understand.
-- Also, the performance of "smith" is very bad at certain trivial cases,
-- much worse than some non-trivial cases.
mat1 = makeMat2×2
1 0
0 1
-- Time: 528ms
test1 = getElemDiv mat1
-- Time: 51ms
test1' = getDiagDiv mat1
mat2 = makeMat2×2
2 0
0 1
-- Time: 89,437ms
-- Why so slow?
test2 = getElemDiv mat2
-- Time: 51ms
test2' = getDiagDiv mat2
mat3 = makeMat2×2
2 1
3 5
-- Time: 3,308ms
test3 = getElemDiv mat3
-- Time: 1,887ms
test3' = getDiagDiv mat3
mat4 = makeMat2×2
4 2
2 4
-- Time: 3,284ms
test4 = getElemDiv mat4
-- Time: 1,942ms
test4' = getDiagDiv mat4
mat5 = makeMat3×3
1 0 0
0 0 0
0 0 0
-- Time: 9,400ms
test5 = getElemDiv mat5
-- Time: 337ms
test5' = getDiagDiv mat5
mat6 = makeMat3×3
1 0 0
0 1 0
0 0 1
-- Time: ???
-- It doesn't work out already.
test6 = getElemDiv mat6
-- Time: 8,598ms
test6' = getDiagDiv mat6
mat7 = makeMat3×3
1 1 0
3 2 1
2 0 1
-- Time: ???
test7 = getElemDiv mat7
-- Time: 14,149ms
test7' = getDiagDiv mat7
mat8 = makeMat3×3
2 3 1
2 2 3
1 1 0
-- Time: ???
test8 = getElemDiv mat8
-- Time: ???
-- Not working either.
test8' = getDiagDiv mat8
| {
"alphanum_fraction": 0.6816960287,
"author": null,
"avg_line_length": 22.3266666667,
"converted": null,
"ext": "agda",
"file": null,
"hexsha": "2331b6e54529447e807efcc1cd97f23d2cd80ae7",
"include": null,
"lang": "Agda",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "58c0b83bb0fed0dc683f3d29b1709effe51c1689",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "thomas-lamiaux/cubical",
"max_forks_repo_path": "Cubical/Experiments/IntegerMatrix.agda",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "58c0b83bb0fed0dc683f3d29b1709effe51c1689",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "thomas-lamiaux/cubical",
"max_issues_repo_path": "Cubical/Experiments/IntegerMatrix.agda",
"max_line_length": 73,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "58c0b83bb0fed0dc683f3d29b1709effe51c1689",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "thomas-lamiaux/cubical",
"max_stars_repo_path": "Cubical/Experiments/IntegerMatrix.agda",
"max_stars_repo_stars_event_max_datetime": "2021-10-31T17:32:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-31T17:32:49.000Z",
"num_tokens": 1308,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3349
} |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
#set font size of labels on matplotlib plots
plt.rc('font', size=16)
#set style of plots
sns.set_style('white')
#define a custom palette
customPalette = ['#630C3A', '#39C8C6', '#D3500C', '#FFB139']
sns.set_palette(customPalette)
sns.palplot(customPalette)
#number of points per group
n = 50
#define group labels and their centers
groups = {'A': (2,2),
'B': (4,4)}
#create labeled x and y data
data = pd.DataFrame(index=range(n*len(groups)), columns=['x','y','label'])
for i, group in enumerate(groups.keys()):
#randomly select n datapoints from a gaussian distrbution
data.loc[i*n:((i+1)*n)-1,['x','y']] = np.random.normal(groups[group],
[0.5,0.5],
[n,2])
#add group labels
data.loc[i*n:((i+1)*n)-1,['label']] = group
# print(data)
#plot data with seaborn
facet = sns.lmplot(data=data, x='x', y='y', hue='label',
fit_reg=False, legend=True, legend_out=True)
| {
"alphanum_fraction": 0.5899653979,
"author": null,
"avg_line_length": 28.9,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f4c7a9fbbe06603694dfddd2066d7da89eddef4f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "87f0f481926c40855223e2843bd728edb235c516",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "xing710/ModSimPy",
"max_forks_repo_path": "code/sublime_text/test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "87f0f481926c40855223e2843bd728edb235c516",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "xing710/ModSimPy",
"max_issues_repo_path": "code/sublime_text/test.py",
"max_line_length": 74,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "87f0f481926c40855223e2843bd728edb235c516",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "xing710/ModSimPy",
"max_stars_repo_path": "code/sublime_text/test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 306,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1156
} |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 15 16:42:48 2012
Show an animated sine function and measure frames per second (FPS)
@author: Muenker_2
"""
import numpy as np
import time
import matplotlib.pyplot as plt
plt.ion() # interactive mode on
tstart = time.time() # for profiling
x = np.arange(0, 2 * np.pi, 0.01) # create x-array
line, = plt.plot(x, np.sin(x))
for i in np.arange(1, 200):
line.set_ydata(np.sin(x + i / 10.0)) # update the data
plt.draw() # redraw the canvas
print('FPS:' , 200 / (time.time() - tstart)) | {
"alphanum_fraction": 0.6137931034,
"author": null,
"avg_line_length": 25.2173913043,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "862ab532bcdbb115fd1abe101024385adf0f114d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c85ac95a10c09d7fa15d63b2bdb24acab89fec60",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chipmuenk/acoustics",
"max_forks_repo_path": "code/LTI/Basics/running_sine.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c85ac95a10c09d7fa15d63b2bdb24acab89fec60",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "chipmuenk/acoustics",
"max_issues_repo_path": "code/LTI/Basics/running_sine.py",
"max_line_length": 66,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c85ac95a10c09d7fa15d63b2bdb24acab89fec60",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chipmuenk/acoustics",
"max_stars_repo_path": "code/LTI/Basics/running_sine.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 175,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 580
} |
from PIL import Image
import numpy as np
from scipy.ndimage.morphology import binary_erosion
import requests
image_url = "https://user-images.githubusercontent.com/1842985/94539467-8c03f280-0245-11eb-82d6-a938405b48fe.JPG"
with requests.get(image_url) as r:
with open("input_image.jpg", "wb") as f:
f.write(r.content)
size = (1500, 1000)
# Resize image to not run out of memory
Image.open("input_image.jpg").resize(size, Image.BOX).save("input_image.png")
u2net_alpha = Image.open("u2net_prediction.png").convert("L").resize(size, Image.BOX)
# convert to numpy array in range [0, 1]
u2net_alpha = np.array(u2net_alpha)
# guess likely foreground/background
is_foreground = u2net_alpha > 240
is_background = u2net_alpha < 10
# erode foreground/background
size = 31
structure = np.ones((size, size), dtype=np.int)
is_foreground = binary_erosion(is_foreground, structure=structure)
is_background = binary_erosion(is_background, structure=structure, border_value=1)
# build trimap
# 0 = background
# 128 = unknown
# 255 = foreground
trimap = np.full(u2net_alpha.shape, dtype=np.uint8, fill_value=128)
trimap[is_foreground] = 255
trimap[is_background] = 0
Image.fromarray(trimap).save("trimap.png")
from pymatting import cutout
cutout("input_image.png", "trimap.png", "cutout.png") | {
"alphanum_fraction": 0.7617217525,
"author": null,
"avg_line_length": 29.5681818182,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3554a132a5635e0df85d168e5a65ede4a8392b40",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d5405e38491af87b62058c3e9c0dbb9bc46fa866",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "trarynight/u2net",
"max_forks_repo_path": "gen_result.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d5405e38491af87b62058c3e9c0dbb9bc46fa866",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "trarynight/u2net",
"max_issues_repo_path": "gen_result.py",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d5405e38491af87b62058c3e9c0dbb9bc46fa866",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "trarynight/u2net",
"max_stars_repo_path": "gen_result.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 378,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1301
} |
import numpy as np
import pandas as pd
def interpolate_dates(df, date_col=None):
data = df.copy()
if date_col is None:
date_col = data.columns[0]
data[date_col] = pd.to_datetime(data[date_col])
dr = pd.date_range(start=data[date_col].min(), end=data[date_col].max(), freq='1D')
date_df = pd.DataFrame({f'{date_col}': dr})
merged = date_df.merge(data, how='left', on=date_col)
reindexed = merged.set_index(date_col)
reindexed.replace({0: np.nan}, inplace=True)
resampled = reindexed.interpolate(method='linear')
resampled = resampled.reset_index()
resampled.rename({'index': date_col}, axis=1, inplace=True)
resampled.fillna(0, inplace=True)
return resampled | {
"alphanum_fraction": 0.6846473029,
"author": null,
"avg_line_length": 31.4347826087,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "45e1dbe178d9c35eae593679443899537f65af08",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a02cb87841395f30911242a019f28f6ac15f27ec",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hammer-mt/hommmer",
"max_forks_repo_path": "src/hommmer/cleaners/interpolate_dates.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a02cb87841395f30911242a019f28f6ac15f27ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hammer-mt/hommmer",
"max_issues_repo_path": "src/hommmer/cleaners/interpolate_dates.py",
"max_line_length": 87,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "a02cb87841395f30911242a019f28f6ac15f27ec",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hammer-mt/hommmer",
"max_stars_repo_path": "src/hommmer/cleaners/interpolate_dates.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-23T00:38:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-09T21:27:30.000Z",
"num_tokens": 185,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 723
} |
/*
* Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
*/
#include "base/os.h"
#include <boost/assign/list_of.hpp>
#include <base/logging.h>
#include <io/event_manager.h>
#include <tbb/task.h>
#include <base/task.h>
#include <cmn/agent_cmn.h>
#include <cfg/cfg_init.h>
#include "oper/operdb_init.h"
#include "controller/controller_init.h"
#include "pkt/pkt_init.h"
#include "services/services_init.h"
#include "vrouter/ksync/ksync_init.h"
#include "oper/interface_common.h"
#include "oper/nexthop.h"
#include "oper/tunnel_nh.h"
#include "route/route.h"
#include "oper/vrf.h"
#include "oper/mpls.h"
#include "oper/vm.h"
#include "oper/vn.h"
#include "filter/acl.h"
#include "oper/path_preference.h"
#include "test_cmn_util.h"
#include "kstate/test/test_kstate_util.h"
#include "vr_types.h"
#include "net/bgp_af.h"
#include <controller/controller_export.h>
using namespace boost::assign;
std::string eth_itf;
void RouterIdDepInit(Agent *agent) {
}
static void ValidateSandeshResponse(Sandesh *sandesh, vector<int> &result) {
//TBD
//Validate the response by the expectation
}
class RouteTest : public ::testing::Test {
public:
void NhListener(DBTablePartBase *partition, DBEntryBase *dbe) {
return;
}
void RtListener(DBTablePartBase *partition, DBEntryBase *dbe) {
return;
}
static void SetTunnelType(TunnelType::Type type) {
TunnelType::SetDefaultType(type);
type_ = type;
}
static TunnelType::Type GetTunnelType() {
return type_;
}
protected:
RouteTest() : vrf_name_("vrf1"), eth_name_(eth_itf) {
default_dest_ip_ = Ip4Address::from_string("0.0.0.0");
if (Agent::GetInstance()->router_id_configured()) {
vhost_ip_ = Agent::GetInstance()->router_id();
} else {
vhost_ip_ = Ip4Address::from_string("10.1.1.10");
}
if (Agent::GetInstance()->vhost_default_gateway() != default_dest_ip_) {
is_gateway_configured = true;
fabric_gw_ip_ = Agent::GetInstance()->vhost_default_gateway();
} else {
is_gateway_configured = false;
fabric_gw_ip_ = Ip4Address::from_string("10.1.1.254");
}
foreign_gw_ip_ = Ip4Address::from_string("10.10.10.254");
server1_ip_ = Ip4Address::from_string("10.1.1.11");
server2_ip_ = Ip4Address::from_string("10.1.122.11");
local_vm_ip_ = Ip4Address::from_string("1.1.1.10");
subnet_vm_ip_1_ = Ip4Address::from_string("1.1.1.0");
subnet_vm_ip_2_ = Ip4Address::from_string("2.2.2.96");
subnet_vm_ip_3_ = Ip4Address::from_string("3.3.0.0");
remote_vm_ip_ = Ip4Address::from_string("1.1.1.11");
remote_subnet_ip_ = Ip4Address::from_string("1.1.1.9");
trap_ip_ = Ip4Address::from_string("1.1.1.100");
lpm1_ip_ = Ip4Address::from_string("2.0.0.0");
lpm2_ip_ = Ip4Address::from_string("2.1.0.0");
lpm3_ip_ = Ip4Address::from_string("2.1.1.0");
lpm4_ip_ = Ip4Address::from_string("2.1.1.1");
lpm5_ip_ = Ip4Address::from_string("2.1.1.2");
}
virtual void SetUp() {
client->Reset();
//Create a VRF
VrfAddReq(vrf_name_.c_str());
PhysicalInterface::CreateReq(Agent::GetInstance()->interface_table(),
eth_name_,
Agent::GetInstance()->fabric_vrf_name(),
PhysicalInterface::FABRIC,
PhysicalInterface::ETHERNET, false, nil_uuid(),
Ip4Address(0), Interface::TRANSPORT_ETHERNET);
AddResolveRoute(server1_ip_, 24);
client->WaitForIdle();
agent_ = Agent::GetInstance();
}
virtual void TearDown() {
VrfDelReq(vrf_name_.c_str());
client->WaitForIdle();
TestRouteTable table1(1);
WAIT_FOR(100, 1000, (table1.Size() == 0));
EXPECT_EQ(table1.Size(), 0U);
TestRouteTable table2(2);
WAIT_FOR(100, 1000, (table2.Size() == 0));
EXPECT_EQ(table2.Size(), 0U);
TestRouteTable table3(3);
WAIT_FOR(100, 1000, (table3.Size() == 0));
EXPECT_EQ(table3.Size(), 0U);
WAIT_FOR(100, 1000, (VrfFind(vrf_name_.c_str()) != true));
WAIT_FOR(1000, 1000, agent_->vrf_table()->Size() == 1);
}
void AddHostRoute(Ip4Address addr) {
Agent::GetInstance()->fabric_inet4_unicast_table()->AddHostRoute(
vrf_name_, addr, 32, Agent::GetInstance()->fabric_vn_name(),
false);
client->WaitForIdle();
}
void AddVhostRoute() {
Agent::GetInstance()->fabric_inet4_unicast_table()->AddVHostRecvRouteReq(
Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(),
"vhost0", vhost_ip_, 32, "", false);
client->WaitForIdle();
}
void AddRemoteVmRoute(const Ip4Address &remote_vm_ip,
const Ip4Address &server_ip, uint32_t plen,
uint32_t label, TunnelType::TypeBmap bmap) {
//Passing vn name as vrf name itself
Inet4TunnelRouteAdd(NULL, vrf_name_, remote_vm_ip, plen, server_ip,
bmap, label, vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
}
void AddRemoteVmRoute(const Ip4Address &remote_vm_ip,
const Ip4Address &server_ip, uint32_t plen,
uint32_t label) {
AddRemoteVmRoute(remote_vm_ip, server_ip, plen, label,
TunnelType::AllType());
}
void AddResolveRoute(const Ip4Address &server_ip, uint32_t plen) {
InetInterfaceKey vhost_intf_key(
Agent::GetInstance()->vhost_interface()->name());
Agent::GetInstance()->fabric_inet4_unicast_table()->AddResolveRoute(
Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(), server_ip, plen,
vhost_intf_key, 0, false, "", SecurityGroupList());
client->WaitForIdle();
}
void AddGatewayRoute(const std::string &vrf_name,
const Ip4Address &ip, int plen,
const Ip4Address &server) {
Agent::GetInstance()->fabric_inet4_unicast_table()->AddGatewayRouteReq
(Agent::GetInstance()->local_peer(),
vrf_name, ip, plen, server, "", MplsTable::kInvalidLabel,
SecurityGroupList(), CommunityList());
client->WaitForIdle();
}
void AddVlanNHRoute(const std::string &vrf_name, const std::string &ip,
uint16_t plen, int id, uint16_t tag,
uint16_t label, const std::string &vn_name) {
SecurityGroupList sg_l;
VnListType vn_list;
vn_list.insert(vn_name);
Agent::GetInstance()->fabric_inet4_unicast_table()->
AddVlanNHRouteReq(NULL, vrf_name_, Ip4Address::from_string(ip), plen,
MakeUuid(id), tag, label, vn_list, sg_l,
PathPreference());
client->WaitForIdle();
}
void DeleteRoute(const Peer *peer, const std::string &vrf_name,
const Ip4Address &addr, uint32_t plen) {
AgentRoute *rt = RouteGet(vrf_name, addr, plen);
uint32_t path_count = rt->GetPathList().size();
Agent::GetInstance()->fabric_inet4_unicast_table()->DeleteReq(peer, vrf_name,
addr, plen, NULL);
client->WaitForIdle();
WAIT_FOR(1000, 10000, ((RouteFind(vrf_name, addr, plen) != true) ||
(rt->GetPathList().size() == (path_count - 1))));
}
bool IsSameNH(const Ip4Address &ip1, uint32_t plen1, const Ip4Address &ip2,
uint32_t plen2, const string vrf_name) {
InetUnicastRouteEntry *rt1 = RouteGet(vrf_name, ip1, plen1);
const NextHop *nh1 = rt1->GetActiveNextHop();
InetUnicastRouteEntry *rt2 = RouteGet(vrf_name, ip1, plen1);
const NextHop *nh2 = rt2->GetActiveNextHop();
return (nh1 == nh2);
}
std::string vrf_name_;
std::string eth_name_;
Ip4Address default_dest_ip_;
Ip4Address local_vm_ip_;
Ip4Address subnet_vm_ip_1_;
Ip4Address subnet_vm_ip_2_;
Ip4Address subnet_vm_ip_3_;
Ip4Address remote_vm_ip_;
Ip4Address remote_subnet_ip_;
Ip4Address vhost_ip_;
Ip4Address fabric_gw_ip_;
Ip4Address foreign_gw_ip_;
Ip4Address trap_ip_;
Ip4Address server1_ip_;
Ip4Address server2_ip_;
Ip4Address lpm1_ip_;
Ip4Address lpm2_ip_;
Ip4Address lpm3_ip_;
Ip4Address lpm4_ip_;
Ip4Address lpm5_ip_;
bool is_gateway_configured;
Agent *agent_;
static TunnelType::Type type_;
};
TunnelType::Type RouteTest::type_;
class TestRtState : public DBState {
public:
TestRtState() : DBState(), dummy_(0) { };
int dummy_;
};
// Validate that routes db-tables have 1 partition only
TEST_F(RouteTest, PartitionCount_1) {
string vrf_name = agent_->fabric_vrf_name();
VrfEntry *vrf = agent_->vrf_table()->FindVrfFromName(vrf_name);
EXPECT_TRUE(vrf != NULL);
for (int i = Agent::INVALID + 1; i < Agent::ROUTE_TABLE_MAX; i++) {
EXPECT_EQ(1, vrf->GetRouteTable(i)->PartitionCount());
}
}
TEST_F(RouteTest, HostRoute_1) {
//Host Route - Used to trap packets to agent
//Add and delete host route
AddHostRoute(trap_ip_);
EXPECT_TRUE(RouteFind(vrf_name_, trap_ip_, 32));
DeleteRoute(Agent::GetInstance()->local_peer(), vrf_name_, trap_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, trap_ip_, 32));
}
TEST_F(RouteTest, SubnetRoute_1) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
{"2.2.2.100", 28, "2.2.2.200", true},
{"3.3.3.0", 16, "3.3.30.200", true},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 3);
client->WaitForIdle();
InetUnicastRouteEntry *rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
InetUnicastRouteEntry *rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
InetUnicastRouteEntry *rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt1 != NULL);
EXPECT_TRUE(rt2 != NULL);
EXPECT_TRUE(rt3 != NULL);
EXPECT_TRUE(rt1->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt2->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt3->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt1->ipam_subnet_route() == true);
EXPECT_TRUE(rt2->ipam_subnet_route() == true);
EXPECT_TRUE(rt3->ipam_subnet_route() == true);
EXPECT_TRUE(rt1->IsRPFInvalid());
EXPECT_TRUE(rt2->IsRPFInvalid());
EXPECT_TRUE(rt3->IsRPFInvalid());
EXPECT_TRUE(rt1->dest_vn_name() == "vn1");
EXPECT_TRUE(rt2->dest_vn_name() == "vn1");
EXPECT_TRUE(rt3->dest_vn_name() == "vn1");
BgpPeer *peer = CreateBgpPeer("127.0.0.1", "remote");
FillEvpnNextHop(peer, "vrf1", 1000, TunnelType::MplsType());
client->WaitForIdle();
//Addition of evpn composite NH should not change subnet route
EXPECT_TRUE(rt1->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt2->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt3->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt1->ipam_subnet_route() == true);
EXPECT_TRUE(rt2->ipam_subnet_route() == true);
EXPECT_TRUE(rt3->ipam_subnet_route() == true);
EXPECT_TRUE(rt1->IsRPFInvalid());
EXPECT_TRUE(rt2->IsRPFInvalid());
EXPECT_TRUE(rt3->IsRPFInvalid());
//Call for sandesh
Inet4UcRouteReq *uc_list_req = new Inet4UcRouteReq();
std::vector<int> result = list_of(1);
Sandesh::set_response_callback(boost::bind(ValidateSandeshResponse, _1, result));
uc_list_req->set_vrf_index(1);
uc_list_req->HandleRequest();
client->WaitForIdle();
uc_list_req->Release();
client->WaitForIdle();
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
FlushEvpnNextHop(peer, "vrf1", 0);
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
DeleteBgpPeer(peer);
client->WaitForIdle();
}
/* Change IPAM list and verify clear/add of subnet route */
TEST_F(RouteTest, SubnetRoute_2) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
{"2.2.2.100", 28, "2.2.2.200", true},
{"3.3.3.0", 16, "3.3.30.200", true},
};
IpamInfo ipam_info_2[] = {
{"2.2.2.100", 28, "2.2.2.200", true},
};
IpamInfo ipam_info_3[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 3);
client->WaitForIdle();
InetUnicastRouteEntry *rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
InetUnicastRouteEntry *rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
InetUnicastRouteEntry *rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
if((rt1 == NULL) && (rt2 == NULL) && (rt3 == NULL))
return;
EXPECT_TRUE(rt1 != NULL);
EXPECT_TRUE(rt2 != NULL);
EXPECT_TRUE(rt3 != NULL);
EXPECT_TRUE(rt1->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt2->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt3->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt1->IsRPFInvalid());
EXPECT_TRUE(rt2->IsRPFInvalid());
EXPECT_TRUE(rt3->IsRPFInvalid());
BgpPeer *peer = CreateBgpPeer("127.0.0.1", "remote");
FillEvpnNextHop(peer, "vrf1", 1000, TunnelType::MplsType());
client->WaitForIdle();
EXPECT_TRUE(rt1->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt2->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt3->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt1->IsRPFInvalid());
EXPECT_TRUE(rt2->IsRPFInvalid());
EXPECT_TRUE(rt3->IsRPFInvalid());
FlushEvpnNextHop(peer, "vrf1", 0);
AddIPAM("vn1", ipam_info_2, 1);
client->WaitForIdle();
rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt1 == NULL);
EXPECT_TRUE(rt2 != NULL);
EXPECT_TRUE(rt3 == NULL);
EXPECT_TRUE(rt2->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt2->IsRPFInvalid());
FillEvpnNextHop(peer, "vrf1", 1000, TunnelType::MplsType());
EXPECT_TRUE(rt2->GetActiveNextHop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(rt2->ipam_subnet_route());
EXPECT_TRUE(rt2->IsRPFInvalid());
AddIPAM("vn1", ipam_info_3, 1);
FlushEvpnNextHop(peer, "vrf1", 0);
client->WaitForIdle();
//Just check for sandesh message handling
Inet4UcRouteReq *uc_list_req = new Inet4UcRouteReq();
std::vector<int> result = list_of(1);
Sandesh::set_response_callback(boost::bind(ValidateSandeshResponse, _1, result));
uc_list_req->set_vrf_index(1);
uc_list_req->HandleRequest();
client->WaitForIdle();
uc_list_req->Release();
client->WaitForIdle();
rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt1 != NULL);
EXPECT_TRUE(rt2 == NULL);
EXPECT_TRUE(rt3 == NULL);
EXPECT_TRUE(rt1->GetActiveNextHop()->GetType() == NextHop::DISCARD);
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt1 == NULL);
EXPECT_TRUE(rt2 == NULL);
EXPECT_TRUE(rt3 == NULL);
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
DeleteBgpPeer(peer);
client->WaitForIdle();
}
TEST_F(RouteTest, VhostRecvRoute_1) {
//Recv route for IP address set on vhost interface
//Add and delete recv route on fabric VRF
AddVhostRoute();
EXPECT_TRUE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), vhost_ip_, 32));
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), vhost_ip_, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), vhost_ip_, 32));
}
TEST_F(RouteTest, LocalVmRoute_1) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
EXPECT_TRUE(VmPortActive(input, 0));
EXPECT_TRUE(RouteFind(vrf_name_, local_vm_ip_, 32));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, local_vm_ip_, 32);
EXPECT_TRUE(rt->dest_vn_name() == "vn1");
EXPECT_TRUE(rt->GetActivePath()->vxlan_id() == VxLanTable::kInvalidvxlan_id);
EXPECT_TRUE(rt->GetActivePath()->tunnel_bmap() == TunnelType::MplsType());
EXPECT_FALSE(rt->ipam_subnet_route());
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
int i = 0;
while(RouteFind(vrf_name_, local_vm_ip_, 32) == true && ++i < 25) {
client->WaitForIdle();
}
EXPECT_FALSE(VmPortFind(input, 0));
}
TEST_F(RouteTest, RemoteVmRoute_1) {
AddRemoteVmRoute(remote_vm_ip_, fabric_gw_ip_, 32, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
EXPECT_TRUE(rt->dest_vn_name() == vrf_name_);
EXPECT_TRUE(rt->GetActiveLabel() == MplsTable::kStartLabel);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::TUNNEL);
EXPECT_FALSE(rt->ipam_subnet_route());
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
}
TEST_F(RouteTest, RemoteVmRoute_2) {
//Add remote VM route, make it point to a server
//whose ARP is not resolved, since there is no resolve
//route, tunnel NH will be marked invalid.
AddRemoteVmRoute(remote_vm_ip_, server1_ip_, 32, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *addr_rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
const NextHop *addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == false);
//Add ARP for server IP address
//Once Arp address is added, remote VM tunnel nexthop
//would be reevaluated, and tunnel nexthop would be valid
AddArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(addr_nh->IsValid() == true);
//Delete Remote VM route
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
//Delete ARP route
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(),
server1_ip_, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server1_ip_, 32));
DelArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
}
TEST_F(RouteTest, RemoteVmRoute_3) {
//Add a remote VM route with prefix len 32
AddRemoteVmRoute(remote_vm_ip_, server1_ip_, 32, MplsTable::kStartLabel);
//Add a remote VM route with prefix len 24
AddRemoteVmRoute(remote_vm_ip_, server1_ip_, 24, MplsTable::kStartLabel+1);
//Delete more specific(/32) route, and verify in kernel
//that specific route points to nexthop of /24 route
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
//Cleanup /24 route also
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 24);
}
TEST_F(RouteTest, RemoteVmRoute_4) {
//Add resolve route
AddResolveRoute(server1_ip_, 24);
//Add a remote VM route pointing to server in same
//subnet, tunnel NH will trigger ARP resolution
AddRemoteVmRoute(remote_vm_ip_, server1_ip_, 32, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *addr_rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
const NextHop *addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == false);
EXPECT_TRUE(addr_nh->GetType() == NextHop::TUNNEL);
if (addr_nh->GetType() == NextHop::TUNNEL) {
const TunnelNH *tun = static_cast<const TunnelNH *>(addr_nh);
TunnelType t(RouteTest::GetTunnelType());
EXPECT_TRUE(tun->GetTunnelType().Compare(t));
}
client->Reset();
//Add ARP for server IP address
//Once Arp address is added, remote VM tunnel nexthop
//would be reevaluated, and tunnel nexthop would be valid
AddArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(addr_nh->IsValid() == true);
client->NHWait(2);
//Trigger change of route. verify tunnel NH is also notified
AddArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0e", eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(addr_nh->IsValid() == true);
client->NHWait(4);
//No-op change, verify tunnel NH is not notified
AddArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0e", eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(addr_nh->IsValid() == true);
client->NHWait(4);
EXPECT_TRUE(client->nh_notify_ == 4);
//Delete the ARP NH and make sure tunnel NH is marked
//invalid
DelArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0e", eth_name_.c_str());
client->WaitForIdle();
client->NHWait(6);
EXPECT_TRUE(addr_nh->IsValid() == false);
//Readd ARP and verify tunnel NH is changed
AddArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0e", eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(addr_nh->IsValid() == true);
//Delete Remote VM route
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
//Delete ARP route
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(),
server1_ip_, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server1_ip_, 32));
//Delete Resolve route
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(),
server1_ip_, 24);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server1_ip_, 24));
DelArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
}
TEST_F(RouteTest, RemoteVmRoute_5) {
if (!is_gateway_configured) {
Agent::GetInstance()->set_vhost_default_gateway(fabric_gw_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, fabric_gw_ip_);
client->WaitForIdle();
}
//Add remote VM route IP, pointing to 0.0.0.0
AddRemoteVmRoute(remote_vm_ip_, server2_ip_, 32, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *addr_rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
const NextHop *addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == false);
//Resolve ARP for gw
AddArp(fabric_gw_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f",
eth_name_.c_str());
client->WaitForIdle();
addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == true);
//Delete ARP for gw
DelArp(fabric_gw_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f",
eth_name_.c_str());
client->WaitForIdle();
addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == false);
//Delete remote server route
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
if (!is_gateway_configured) {
Agent::GetInstance()->set_vhost_default_gateway(default_dest_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, default_dest_ip_);
client->WaitForIdle();
}
}
TEST_F(RouteTest, RemoteVmRoute_no_gw) {
if (is_gateway_configured) {
Agent::GetInstance()->set_vhost_default_gateway(default_dest_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, default_dest_ip_);
client->WaitForIdle();
}
//Add remote VM route IP, pointing to 0.0.0.0
AddRemoteVmRoute(remote_vm_ip_, server2_ip_, 32, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *addr_rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
const NextHop *addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == true);
EXPECT_TRUE(addr_nh->GetType() == NextHop::TUNNEL);
if (addr_nh->GetType() == NextHop::TUNNEL) {
const TunnelNH *tun = static_cast<const TunnelNH *>(addr_nh);
EXPECT_TRUE(tun->GetRt()->GetActiveNextHop()->GetType() == NextHop::DISCARD);
Agent::GetInstance()->set_vhost_default_gateway(fabric_gw_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, fabric_gw_ip_);
client->WaitForIdle();
//addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == false);
//Resolve ARP for gw
AddArp(fabric_gw_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f",
eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(addr_nh->IsValid() == true);
//Delete ARP for gw
DelArp(fabric_gw_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f",
eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(addr_nh->IsValid() == false);
Agent::GetInstance()->set_vhost_default_gateway(default_dest_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, default_dest_ip_);
client->WaitForIdle();
}
//Delete remote server route
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
if (is_gateway_configured) {
Agent::GetInstance()->set_vhost_default_gateway(fabric_gw_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, fabric_gw_ip_);
client->WaitForIdle();
}
}
TEST_F(RouteTest, RemoteVmRoute_foreign_gw) {
Agent::GetInstance()->set_vhost_default_gateway(foreign_gw_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, foreign_gw_ip_);
client->WaitForIdle();
//Add remote VM route IP, pointing to 0.0.0.0
AddRemoteVmRoute(remote_vm_ip_, server2_ip_, 32, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *addr_rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
const NextHop *addr_nh = addr_rt->GetActiveNextHop();
EXPECT_TRUE(addr_nh->IsValid() == true);
EXPECT_TRUE(addr_nh->GetType() == NextHop::TUNNEL);
if (addr_nh->GetType() == NextHop::TUNNEL) {
const TunnelNH *tun = static_cast<const TunnelNH *>(addr_nh);
EXPECT_TRUE(tun->GetRt()->GetActiveNextHop()->GetType() == NextHop::DISCARD);
}
//Delete remote server route
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
if (is_gateway_configured) {
Agent::GetInstance()->set_vhost_default_gateway(fabric_gw_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, fabric_gw_ip_);
client->WaitForIdle();
} else {
Agent::GetInstance()->set_vhost_default_gateway(default_dest_ip_);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(),
default_dest_ip_, 0, default_dest_ip_);
client->WaitForIdle();
}
}
TEST_F(RouteTest, GatewayRoute_1) {
//Addition and deletion of gateway route.
//We add a gateway route as below
//server2_ip ----->GW---->ARP NH
//Server2 route and GW route, should have same NH
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(), server2_ip_, 32,
fabric_gw_ip_);
EXPECT_TRUE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server2_ip_, 32));
//Resolve ARP for subnet gateway route
AddArp(fabric_gw_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f",
eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(IsSameNH(server2_ip_, 32, fabric_gw_ip_, 32,
Agent::GetInstance()->fabric_vrf_name()));
//Change mac, and verify that nexthop of gateway route
//also get updated
AddArp(fabric_gw_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0e",
eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(IsSameNH(server2_ip_, 32, fabric_gw_ip_, 32,
Agent::GetInstance()->fabric_vrf_name()));
//Delete indirect route
DeleteRoute(Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(), server2_ip_, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server2_ip_, 32));
//Delete ARP route, since no covering resolve route
//is present server2 route would point to discard NH
DelArp(fabric_gw_ip_.to_string(), "0a:0b:0c:0d:0e:0e", eth_name_);
client->WaitForIdle();
}
TEST_F(RouteTest, GatewayRoute_2) {
Ip4Address a = Ip4Address::from_string("4.4.4.4");
Ip4Address b = Ip4Address::from_string("5.5.5.5");
Ip4Address c = Ip4Address::from_string("6.6.6.6");
Ip4Address d = Ip4Address::from_string("7.7.7.7");
//Add gateway route a reachable via b, b reachable
//via c, c reachable via d.
AddArp(d.to_string().c_str(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(), c, 32, d);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(), b, 32, c);
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(), a, 32, b);
client->WaitForIdle();
EXPECT_TRUE(IsSameNH(a, 32, b, 32, Agent::GetInstance()->fabric_vrf_name()));
EXPECT_TRUE(IsSameNH(b, 32, c, 32, Agent::GetInstance()->fabric_vrf_name()));
EXPECT_TRUE(IsSameNH(c, 32, d, 32, Agent::GetInstance()->fabric_vrf_name()));
DelArp(d.to_string().c_str(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(IsSameNH(a, 32, b, 32, Agent::GetInstance()->fabric_vrf_name()));
EXPECT_TRUE(IsSameNH(b, 32, c, 32, Agent::GetInstance()->fabric_vrf_name()));
EXPECT_TRUE(IsSameNH(c, 32, d, 32, Agent::GetInstance()->fabric_vrf_name()));
DeleteRoute(Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(), a, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), a ,32));
DeleteRoute(Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(), b, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), b, 32));
DeleteRoute(Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(), c, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), c, 32));
}
// Delete unresolved gateway route
TEST_F(RouteTest, GatewayRoute_3) {
Ip4Address a = Ip4Address::from_string("4.4.4.4");
Ip4Address gw = Ip4Address::from_string("5.5.5.254");
// Add gateway route. Gateway
AddGatewayRoute(agent_->fabric_vrf_name(), a, 32, gw);
client->WaitForIdle();
InetUnicastRouteEntry* rt = RouteGet(agent_->fabric_vrf_name(), a, 32);
EXPECT_TRUE(rt != NULL);
DeleteRoute(agent_->local_peer(), agent_->fabric_vrf_name(), a, 32);
client->WaitForIdle();
EXPECT_FALSE(RouteFind(agent_->fabric_vrf_name(), a ,32));
gw = Ip4Address::from_string("10.1.1.253");
AddGatewayRoute(agent_->fabric_vrf_name(), a, 32, gw);
client->WaitForIdle();
rt = RouteGet(agent_->fabric_vrf_name(), a, 32);
EXPECT_TRUE(rt != NULL);
DeleteRoute(agent_->local_peer(), agent_->fabric_vrf_name(), a, 32);
client->WaitForIdle();
EXPECT_FALSE(RouteFind(agent_->fabric_vrf_name(), a ,32));
}
TEST_F(RouteTest, ResyncUnresolvedRoute_1) {
// There should be no unresolved route
InetUnicastAgentRouteTable *table =
Agent::GetInstance()->fabric_inet4_unicast_table();
EXPECT_EQ(table->unresolved_route_size(), 0);
Ip4Address gw = Ip4Address::from_string("1.1.1.2");
// Add an unresolved gateway route.
// Add a route to force RESYNC of unresolved route
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(), server1_ip_, 32,
gw);
EXPECT_TRUE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server1_ip_,
32));
// One unresolved route should be added
EXPECT_EQ(table->unresolved_route_size(), 1);
InetUnicastRouteEntry *rt =
RouteGet(Agent::GetInstance()->fabric_vrf_name(), server1_ip_, 32);
InetUnicastAgentRouteTable::ReEvaluatePaths(Agent::GetInstance(),
rt->vrf()->GetName(),
rt->addr(),
rt->plen());
client->WaitForIdle();
EXPECT_EQ(table->unresolved_route_size(), 1);
// Add second route.
AddGatewayRoute(Agent::GetInstance()->fabric_vrf_name(), server2_ip_, 32,
gw);
EXPECT_TRUE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server2_ip_,
32));
WAIT_FOR(100, 1000, (table->unresolved_route_size() == 2));
DeleteRoute(Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(), server1_ip_, 32);
DeleteRoute(Agent::GetInstance()->local_peer(),
Agent::GetInstance()->fabric_vrf_name(), server2_ip_, 32);
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server1_ip_,
32));
EXPECT_FALSE(RouteFind(Agent::GetInstance()->fabric_vrf_name(), server2_ip_,
32));
}
TEST_F(RouteTest, FindLPM) {
InetUnicastRouteEntry *rt;
AddResolveRoute(lpm1_ip_, 8);
client->WaitForIdle();
AddResolveRoute(lpm2_ip_, 16);
client->WaitForIdle();
AddResolveRoute(lpm3_ip_, 24);
client->WaitForIdle();
AddArp(lpm4_ip_.to_string().c_str(), "0d:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
AddArp(lpm5_ip_.to_string().c_str(), "0d:0b:0c:0d:0e:0a", eth_name_.c_str());
client->WaitForIdle();
rt = Agent::GetInstance()->fabric_inet4_unicast_table()->FindLPM(lpm4_ip_);
EXPECT_EQ(lpm4_ip_, rt->addr());
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm4_ip_, 32);
client->WaitForIdle();
rt = Agent::GetInstance()->fabric_inet4_unicast_table()->FindLPM(lpm4_ip_);
EXPECT_EQ(lpm3_ip_, rt->addr());
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm3_ip_, 24);
client->WaitForIdle();
rt = Agent::GetInstance()->fabric_inet4_unicast_table()->FindLPM(lpm4_ip_);
EXPECT_EQ(lpm2_ip_, rt->addr());
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm2_ip_, 16);
client->WaitForIdle();
rt = Agent::GetInstance()->fabric_inet4_unicast_table()->FindLPM(lpm4_ip_);
EXPECT_EQ(lpm1_ip_, rt->addr());
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm1_ip_, 8);
client->WaitForIdle();
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm5_ip_, 32);
client->WaitForIdle();
DelArp(lpm4_ip_.to_string().c_str(), "0d:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
DelArp(lpm5_ip_.to_string().c_str(), "0d:0b:0c:0d:0e:0a", eth_name_.c_str());
client->WaitForIdle();
}
TEST_F(RouteTest, VlanNHRoute_1) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
EXPECT_TRUE(VmPortActive(input, 0));
EXPECT_TRUE(RouteFind(vrf_name_, local_vm_ip_, 32));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, local_vm_ip_, 32);
EXPECT_TRUE(rt != NULL);
if (rt) {
EXPECT_TRUE(rt->dest_vn_name() == "vn1");
}
// Add service interface-1
AddVrf("vrf2");
AddVmPortVrf("ser1", "2.2.2.1", 1);
AddLink("virtual-machine-interface-routing-instance", "ser1",
"routing-instance", "vrf2");
AddLink("virtual-machine-interface-routing-instance", "ser1",
"virtual-machine-interface", "vnet1");
client->WaitForIdle();
// Validate service vlan route
rt = RouteGet("vrf2", Ip4Address::from_string("2.2.2.1"), 32);
EXPECT_TRUE(rt != NULL);
// Add a route using NH created for service interface
client->WaitForIdle();
AddVlanNHRoute("vrf1", "2.2.2.0", 24, 1, 1, rt->GetActiveLabel(), "TestVn");
rt = RouteGet("vrf1", Ip4Address::from_string("2.2.2.0"), 24);
EXPECT_TRUE(rt != NULL);
if (rt) {
EXPECT_TRUE(rt->dest_vn_name() == "TestVn");
}
EXPECT_TRUE(rt->GetActivePath()->tunnel_bmap() == TunnelType::MplsType());
AddVmPortVrf("ser1", "2.2.2.1", 10);
client->WaitForIdle();
DelLink("virtual-machine-interface-routing-instance", "ser1",
"routing-instance", "vrf2");
DelLink("virtual-machine-interface-routing-instance", "ser1",
"virtual-machine-interface", "vnet1");
DelVmPortVrf("ser1");
int i = 0;
while (i++ < 50) {
rt = RouteGet("vrf2", Ip4Address::from_string("2.2.2.1"), 32);
if (rt == NULL) {
break;
}
client->WaitForIdle();
}
EXPECT_TRUE(rt == NULL);
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
i = 0;
while(RouteFind(vrf_name_, local_vm_ip_, 32) == true && ++i < 25) {
client->WaitForIdle();
}
EXPECT_FALSE(VmPortFind(input, 0));
DeleteRoute(NULL, "vrf1", Ip4Address::from_string("2.2.2.0"), 24);
client->WaitForIdle();
WAIT_FOR(100, 100,
(RouteGet("vrf1", Ip4Address::from_string("2.2.2.0"), 24) == NULL));
DelVrf("vrf2");
client->WaitForIdle();
}
class TestNhState : public DBState {
public:
TestNhState() : DBState(), dummy_(0) { };
int dummy_;
};
class TestNhPeer : public Peer {
public:
TestNhPeer() : Peer(BGP_PEER, "TestNH", false), dummy_(0) { };
int dummy_;
};
TEST_F(RouteTest, RouteToDeletedNH_1) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
EXPECT_TRUE(VmPortActive(input, 0));
EXPECT_TRUE(RouteFind(vrf_name_, local_vm_ip_, 32));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, local_vm_ip_, 32);
EXPECT_TRUE(rt->dest_vn_name() == "vn1");
MacAddress vm_mac = MacAddress::FromString("00:00:00:01:01:01");
// Add state to NextHop so that entry is not freed on delete
DBTableBase::ListenerId id =
Agent::GetInstance()->nexthop_table()->Register(
boost::bind(&RouteTest::NhListener, this, _1, _2));
InterfaceNHKey key(new VmInterfaceKey(AgentKey::ADD_DEL_CHANGE,
MakeUuid(1), ""),
false, InterfaceNHFlags::INET4, vm_mac);
NextHop *nh =
static_cast<NextHop *>(Agent::GetInstance()->nexthop_table()->FindActiveEntry(&key));
TestNhState *state = new TestNhState();
nh->SetState(Agent::GetInstance()->nexthop_table(), id, state);
DeleteVmportEnv(input, 1, false);
client->WaitForIdle();
EXPECT_TRUE(Agent::GetInstance()->nexthop_table()->FindActiveEntry(&key) == NULL);
EXPECT_TRUE(Agent::GetInstance()->nexthop_table()->Find(&key, true) != NULL);
TestNhPeer *peer = new TestNhPeer();
Ip4Address addr = Ip4Address::from_string("1.1.1.10");
VnListType vn_list;
vn_list.insert("Test");
agent_->fabric_inet4_unicast_table()->AddLocalVmRouteReq(peer, "vrf1",
addr, 32,
MakeUuid(1), vn_list,
10,
SecurityGroupList(),
CommunityList(),
false,
PathPreference(),
Ip4Address(0),
EcmpLoadBalance(),
false, false);
client->WaitForIdle();
InetUnicastAgentRouteTable::DeleteReq(peer, "vrf1", addr, 32, NULL);
client->WaitForIdle();
nh->ClearState(Agent::GetInstance()->nexthop_table(), id);
client->WaitForIdle();
delete state;
delete peer;
Agent::GetInstance()->nexthop_table()->Unregister(id);
client->WaitForIdle();
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
EXPECT_TRUE(Agent::GetInstance()->nexthop_table()->Find(&key, true) == NULL);
int i = 0;
while(RouteFind(vrf_name_, local_vm_ip_, 32) == true && ++i < 25) {
client->WaitForIdle();
}
EXPECT_FALSE(VmPortFind(input, 0));
}
TEST_F(RouteTest, RouteToDeletedNH_2) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1, 1);
client->WaitForIdle();
EXPECT_TRUE(VmPortActive(input, 0));
TestNhPeer *peer1 = new TestNhPeer();
TestNhPeer *peer2 = new TestNhPeer();
Ip4Address addr = Ip4Address::from_string("1.1.1.1");
VnListType vn_list;
vn_list.insert("Test");
agent_->fabric_inet4_unicast_table()->AddLocalVmRouteReq(peer1, "vrf1",
addr, 32,
MakeUuid(1),
vn_list, 10,
SecurityGroupList(),
CommunityList(),
false,
PathPreference(),
Ip4Address(0),
EcmpLoadBalance(),
false, false);
agent_->fabric_inet4_unicast_table()->AddLocalVmRouteReq(peer2, "vrf1",
addr, 32,
MakeUuid(1),
vn_list, 10,
SecurityGroupList(),
CommunityList(),
false,
PathPreference(),
Ip4Address(0),
EcmpLoadBalance(),
false, false);
client->WaitForIdle();
DelNode("access-control-list", "acl1");
DelNode("virtual-network", "vn1");
client->WaitForIdle();
agent_->fabric_inet4_unicast_table()->AddLocalVmRouteReq(peer1, "vrf1",
addr, 32,
MakeUuid(1),
vn_list, 10,
SecurityGroupList(),
CommunityList(),
false,
PathPreference(),
Ip4Address(0),
EcmpLoadBalance(),
false, false);
client->WaitForIdle();
InetUnicastAgentRouteTable::DeleteReq(peer1, "vrf1", addr, 32, NULL);
InetUnicastAgentRouteTable::DeleteReq(peer2, "vrf1", addr, 32, NULL);
client->WaitForIdle();
delete peer1;
delete peer2;
DeleteVmportEnv(input, 1, true, 1);
client->WaitForIdle();
WAIT_FOR(100, 100, (RouteFind("vrf1", addr, 32) == false));
EXPECT_FALSE(VmPortFind(input, 0));
}
TEST_F(RouteTest, RouteToInactiveInterface) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
EXPECT_TRUE(VmPortActive(input, 0));
EXPECT_TRUE(RouteFind(vrf_name_, local_vm_ip_, 32));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, local_vm_ip_, 32);
EXPECT_TRUE(rt->dest_vn_name() == "vn1");
TestNhPeer *peer = new TestNhPeer();
Ip4Address addr = Ip4Address::from_string("1.1.1.10");
VnListType vn_list;
vn_list.insert("Test");
agent_->fabric_inet4_unicast_table()->AddLocalVmRouteReq(peer, "vrf1",
addr, 32,
MakeUuid(1),
vn_list, 10,
SecurityGroupList(),
CommunityList(),
false,
PathPreference(),
Ip4Address(0),
EcmpLoadBalance(),
false, false);
client->WaitForIdle();
DelVn("vn1");
client->WaitForIdle();
EXPECT_TRUE(VmPortInactive(1));
agent_->fabric_inet4_unicast_table()->AddLocalVmRouteReq(peer, "vrf1",
addr, 32,
MakeUuid(1),
vn_list, 10,
SecurityGroupList(),
CommunityList(),
false,
PathPreference(),
Ip4Address(0),
EcmpLoadBalance(),
false, false);
client->WaitForIdle();
InetUnicastAgentRouteTable::DeleteReq(peer, "vrf1", addr, 32, NULL);
client->WaitForIdle();
delete peer;
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
int i = 0;
while(RouteFind(vrf_name_, local_vm_ip_, 32) == true && ++i < 25) {
client->WaitForIdle();
}
EXPECT_FALSE(VmPortFind(input, 0));
}
TEST_F(RouteTest, RtEntryReuse) {
client->Reset();
DBTableBase::ListenerId id =
Agent::GetInstance()->fabric_inet4_unicast_table()->Register(
boost::bind(&RouteTest::RtListener,
this, _1, _2));
InetUnicastRouteEntry *rt;
InetUnicastRouteEntry *rt_hold;
AddResolveRoute(lpm3_ip_, 24);
client->WaitForIdle();
AddArp(lpm4_ip_.to_string().c_str(), "0d:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
rt = Agent::GetInstance()->fabric_inet4_unicast_table()->FindLPM(lpm4_ip_);
EXPECT_EQ(lpm4_ip_, rt->addr());
boost::scoped_ptr<TestRtState> state(new TestRtState());
rt->SetState(Agent::GetInstance()->fabric_inet4_unicast_table(), id, state.get());
rt_hold = rt;
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm4_ip_, 32);
client->WaitForIdle();
rt = Agent::GetInstance()->fabric_inet4_unicast_table()->FindLPM(lpm4_ip_);
EXPECT_EQ(lpm3_ip_, rt->addr());
AddArp(lpm4_ip_.to_string().c_str(), "0d:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
rt = Agent::GetInstance()->fabric_inet4_unicast_table()->FindLPM(lpm4_ip_);
EXPECT_EQ(lpm4_ip_, rt->addr());
EXPECT_EQ(rt, rt_hold);
rt->ClearState(Agent::GetInstance()->fabric_inet4_unicast_table(), id);
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm4_ip_, 32);
client->WaitForIdle();
DeleteRoute(Agent::GetInstance()->local_peer(), Agent::GetInstance()->fabric_vrf_name(), lpm3_ip_, 24);
client->WaitForIdle();
DelArp(lpm4_ip_.to_string().c_str(), "0d:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
Agent::GetInstance()->fabric_inet4_unicast_table()->Unregister(id);
}
TEST_F(RouteTest, ScaleRouteAddDel_1) {
uint32_t i = 0;
for (i = 0; i < 1000; i++) {
AddRemoteVmRoute(remote_vm_ip_, fabric_gw_ip_, 32,
MplsTable::kStartLabel);
InetUnicastAgentRouteTable::DeleteReq(NULL, "vrf1", remote_vm_ip_, 32, NULL);
}
client->WaitForIdle(5);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
}
TEST_F(RouteTest, ScaleRouteAddDel_2) {
uint32_t repeat = 1000;
uint32_t i = 0;
for (i = 0; i < repeat; i++) {
AddRemoteVmRoute(remote_vm_ip_, fabric_gw_ip_, 32,
MplsTable::kStartLabel);
if (i != (repeat - 1)) {
InetUnicastAgentRouteTable::DeleteReq
(Agent::GetInstance()->local_peer(), "vrf1", remote_vm_ip_,
32, NULL);
}
}
client->WaitForIdle(5);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::TUNNEL);
EXPECT_TRUE(rt->GetActiveNextHop()->IsDeleted() == false);
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
TunnelNHKey key(Agent::GetInstance()->fabric_vrf_name(),
Agent::GetInstance()->router_id(), fabric_gw_ip_, false,
TunnelType::DefaultType());
EXPECT_FALSE(FindNH(&key));
}
//Test scale add and delete of composite routes
TEST_F(RouteTest, ScaleRouteAddDel_3) {
ComponentNHKeyList comp_nh_list;
int remote_server_ip = 0x0A0A0A0A;
int label = 16;
int nh_count = 3;
for(int i = 0; i < nh_count; i++) {
ComponentNHKeyPtr comp_nh(new ComponentNHKey(label,
Agent::GetInstance()->fabric_vrf_name(),
Agent::GetInstance()->router_id(), Ip4Address(remote_server_ip++),
false, TunnelType::AllType()));
comp_nh_list.push_back(comp_nh);
label++;
}
SecurityGroupList sg_id_list;
for (uint32_t i = 0; i < 1000; i++) {
EcmpTunnelRouteAdd(NULL, vrf_name_, remote_vm_ip_, 32,
comp_nh_list, false, "test", sg_id_list,
PathPreference());
client->WaitForIdle();
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
}
client->WaitForIdle(5);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
CompositeNHKey key(Composite::ECMP, true, comp_nh_list, vrf_name_);
EXPECT_FALSE(FindNH(&key));
}
//Test scale add and delete of composite routes
TEST_F(RouteTest, ScaleRouteAddDel_4) {
ComponentNHKeyList comp_nh_list;
int remote_server_ip = 0x0A0A0A0A;
int label = 16;
int nh_count = 3;
for(int i = 0; i < nh_count; i++) {
ComponentNHKeyPtr comp_nh(new ComponentNHKey(label,
Agent::GetInstance()->fabric_vrf_name(),
Agent::GetInstance()->router_id(),
Ip4Address(remote_server_ip++),
false, TunnelType::AllType()));
comp_nh_list.push_back(comp_nh);
label++;
}
uint32_t repeat = 1000;
SecurityGroupList sg_id_list;
sg_id_list.push_back(1);
for (uint32_t i = 0; i < repeat; i++) {
EcmpTunnelRouteAdd(NULL, vrf_name_, remote_vm_ip_, 32,
comp_nh_list, -1, "test", sg_id_list,
PathPreference());
client->WaitForIdle();
if (i != (repeat - 1)) {
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
}
}
client->WaitForIdle(5);
EXPECT_TRUE(RouteFind(vrf_name_, remote_vm_ip_, 32));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, remote_vm_ip_, 32);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::COMPOSITE);
EXPECT_TRUE(rt->GetActiveNextHop()->IsDeleted() == false);
const SecurityGroupList &sg = rt->GetActivePath()->sg_list();
EXPECT_TRUE(sg[0] == 1);
DeleteRoute(NULL, vrf_name_, remote_vm_ip_, 32);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
CompositeNHKey key(Composite::ECMP, true, comp_nh_list, vrf_name_);
EXPECT_FALSE(FindNH(&key));
}
//Check path with highest preference gets priority
TEST_F(RouteTest, PathPreference) {
client->Reset();
struct PortInfo input[] = {
{"vnet3", 3, "1.1.1.1", "00:00:00:01:01:01", 3, 3},
{"vnet4", 4, "1.1.1.1", "00:00:00:01:01:01", 3, 4},
};
CreateVmportEnv(input, 2);
client->WaitForIdle();
VmInterface *vnet3 = VmInterfaceGet(3);
VmInterface *vnet4 = VmInterfaceGet(4);
Ip4Address ip = Ip4Address::from_string("1.1.1.1");
InetUnicastRouteEntry *rt = RouteGet("vrf3", ip, 32);
//Enqueue traffic seen from vnet4 interface
Agent::GetInstance()->oper_db()->route_preference_module()->
EnqueueTrafficSeen(ip, 32, vnet4->id(), vnet4->vrf()->vrf_id(),
MacAddress());
client->WaitForIdle();
EXPECT_TRUE(rt->GetActivePath()->peer() == vnet4->peer());
//Enqueue traffic seen from vnet3 interface
Agent::GetInstance()->oper_db()->route_preference_module()->
EnqueueTrafficSeen(ip, 32, vnet3->id(), vnet3->vrf()->vrf_id(),
MacAddress());
client->WaitForIdle();
//Check that path from vnet3 is preferred path
EXPECT_TRUE(rt->GetActivePath()->peer() == vnet3->peer());
DeleteVmportEnv(input, 2, true);
client->WaitForIdle();
}
//If ecmp flag is removed from instance ip, verify that path gets removed from
//ecmp peer path
TEST_F(RouteTest, EcmpPathDelete) {
client->Reset();
struct PortInfo input[] = {
{"vnet3", 3, "1.1.1.1", "00:00:00:01:01:01", 3, 3},
{"vnet4", 4, "1.1.1.1", "00:00:00:01:01:01", 3, 4},
};
CreateVmportWithEcmp(input, 2);
client->WaitForIdle();
Ip4Address ip = Ip4Address::from_string("1.1.1.1");
InetUnicastRouteEntry *rt = RouteGet("vrf3", ip, 32);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::COMPOSITE);
CreateVmportEnv(input, 2);
client->WaitForIdle();
//One of the interface becomes active path
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::INTERFACE);
CreateVmportWithEcmp(input, 2);
client->WaitForIdle();
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::COMPOSITE);
DeleteVmportEnv(input, 2, true);
client->WaitForIdle();
EXPECT_TRUE(RouteGet("vrf3", ip, 32) == NULL);
//Make sure vrf and all routes are deleted
EXPECT_TRUE(VrfFind("vrf3", true) == false);
}
TEST_F(RouteTest, Enqueue_uc_route_add_on_deleted_vrf) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
VrfEntryRef vrf_ref = VrfGet(vrf_name_.c_str());
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
TaskScheduler::GetInstance()->Stop();
Inet4TunnelRouteAdd(NULL, vrf_name_, remote_vm_ip_, 32, server1_ip_,
TunnelType::AllType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
vrf_ref = NULL;
TaskScheduler::GetInstance()->Start();
client->WaitForIdle();
}
TEST_F(RouteTest, Enqueue_uc_route_del_on_deleted_vrf) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
VrfEntryRef vrf_ref = VrfGet(vrf_name_.c_str());
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
TaskScheduler::GetInstance()->Stop();
InetUnicastAgentRouteTable::DeleteReq(NULL, vrf_name_, remote_vm_ip_, 32,
NULL);
vrf_ref = NULL;
TaskScheduler::GetInstance()->Start();
client->WaitForIdle();
}
TEST_F(RouteTest, Enqueue_mc_route_add_on_deleted_vrf) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
VrfEntryRef vrf_ref = VrfGet(vrf_name_.c_str());
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
TaskScheduler::GetInstance()->Stop();
ComponentNHKeyList component_nh_key_list;
Inet4MulticastAgentRouteTable::AddMulticastRoute(vrf_name_, "vn1",
Ip4Address::from_string("0.0.0.0"),
Ip4Address::from_string("255.255.255.255"),
component_nh_key_list);
vrf_ref = NULL;
TaskScheduler::GetInstance()->Start();
client->WaitForIdle();
}
TEST_F(RouteTest, Enqueue_mc_route_del_on_deleted_vrf) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
VrfEntryRef vrf_ref = VrfGet(vrf_name_.c_str());
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
TaskScheduler::GetInstance()->Stop();
Inet4MulticastAgentRouteTable::DeleteMulticastRoute(vrf_name_,
Ip4Address::from_string("0.0.0.0"),
Ip4Address::from_string("255.255.255.255"));
vrf_ref = NULL;
TaskScheduler::GetInstance()->Start();
client->WaitForIdle();
}
TEST_F(RouteTest, SubnetGwForRoute_1) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
};
AddIPAM("vn1", ipam_info, 1, NULL, "vdns1");
client->WaitForIdle();
//Check if the subnet gateway is set to 1.1.1.200 for a route
Ip4Address vm_ip = Ip4Address::from_string("1.1.1.10");
InetUnicastRouteEntry *rt = RouteGet("vrf1", vm_ip, 32);
Ip4Address subnet_service_ip = Ip4Address::from_string("1.1.1.200");
EXPECT_TRUE(rt->GetActivePath()->subnet_service_ip() == subnet_service_ip);
//Update ipam to have different gw address
IpamInfo ipam_info2[] = {
{"1.1.1.0", 24, "1.1.1.201", true},
};
AddIPAM("vn1", ipam_info2, 1, NULL, "vdns1");
client->WaitForIdle();
subnet_service_ip = Ip4Address::from_string("1.1.1.201");
EXPECT_TRUE(rt->GetActivePath()->subnet_service_ip() == subnet_service_ip);
DelIPAM("vn1", "vdns1");
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
//Make sure vrf and all routes are deleted
EXPECT_TRUE(VrfFind("vrf1", true) == false);
}
//Enqueue a pth preference change for
//non existent path and make sure, path change is not
//notified
TEST_F(RouteTest, PathPreference_1) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
Peer peer(Peer::LOCAL_VM_PORT_PEER, "test_peer", true);
Ip4Address ip = Ip4Address::from_string("1.1.1.10");
//Enqueue path change for non existent path
DBRequest req(DBRequest::DB_ENTRY_ADD_CHANGE);
InetUnicastRouteKey *rt_key =
new InetUnicastRouteKey(&peer, "vrf1", ip, 32);
rt_key->sub_op_ = AgentKey::RESYNC;
req.key.reset(rt_key);
req.data.reset(new PathPreferenceData(PathPreference()));
AgentRouteTable *table =
agent_->vrf_table()->GetInet4UnicastRouteTable("vrf1");
table->Enqueue(&req);
client->WaitForIdle();
EXPECT_TRUE(RouteFind("vrf1", ip, 32) == true);
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
//Make sure vrf and all routes are deleted
EXPECT_TRUE(VrfFind("vrf1", true) == false);
}
// Enqueue a route resync for a peer which does not
// have a path to route make sure, path change is not
// notified
TEST_F(RouteTest, RouteResync_1) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:00:01:01:01", 1, 1},
};
client->Reset();
CreateVmportEnv(input, 1);
client->WaitForIdle();
Peer peer(Peer::LOCAL_VM_PORT_PEER, "test_peer", true);
Ip4Address ip = Ip4Address::from_string("1.1.1.10");
//Enqueue path change for non existent path
DBRequest req(DBRequest::DB_ENTRY_ADD_CHANGE);
InetUnicastRouteKey *rt_key =
new InetUnicastRouteKey(&peer, "vrf1", ip, 32);
rt_key->sub_op_ = AgentKey::RESYNC;
req.key.reset(rt_key);
InetInterfaceKey intf_key("vnet1");
VnListType vn_list;
vn_list.insert("vn1");
req.data.reset(new InetInterfaceRoute(intf_key, 1, TunnelType::GREType(),
vn_list, peer->sequence_number()));
AgentRouteTable *table =
agent_->vrf_table()->GetInet4UnicastRouteTable("vrf1");
table->Enqueue(&req);
client->WaitForIdle();
EXPECT_TRUE(RouteFind("vrf1", ip, 32) == true);
DeleteVmportEnv(input, 1, true);
client->WaitForIdle();
//Make sure vrf and all routes are deleted
EXPECT_TRUE(VrfFind("vrf1", true) == false);
}
//Add IPAM and then add a smaller subnet as remote route.
//Flood flag shud be set in both.
TEST_F(RouteTest, SubnetRoute_Flood_1) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
{"2.2.2.100", 28, "2.2.2.200", true},
{"3.3.3.0", 16, "3.3.30.200", true},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 3);
client->WaitForIdle();
InetUnicastRouteEntry *rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
InetUnicastRouteEntry *rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
InetUnicastRouteEntry *rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt1 != NULL);
EXPECT_TRUE(rt2 != NULL);
EXPECT_TRUE(rt3 != NULL);
EXPECT_TRUE(rt1->ipam_subnet_route() == true);
EXPECT_TRUE(rt2->ipam_subnet_route() == true);
EXPECT_TRUE(rt3->ipam_subnet_route() == true);
//Now add remote route
AddRemoteVmRoute(remote_subnet_ip_, fabric_gw_ip_, 29, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_subnet_ip_, 29));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, remote_subnet_ip_, 29);
EXPECT_TRUE(rt->ipam_subnet_route() == true);
//On IPAM going off remote route should remove its flood flag.
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
EXPECT_FALSE(rt->ipam_subnet_route());
client->Reset();
DeleteRoute(NULL, vrf_name_, remote_subnet_ip_, 29);
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
}
//Add remote route and then add IPAM subnet.
//Flood flag shud be set in both.
TEST_F(RouteTest, SubnetRoute_Flood_2) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
{"2.2.2.100", 28, "2.2.2.200", true},
{"3.3.3.0", 16, "3.3.30.200", true},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
//Now add remote route
AddRemoteVmRoute(remote_subnet_ip_, fabric_gw_ip_, 29, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, remote_subnet_ip_, 29));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, remote_subnet_ip_, 29);
EXPECT_FALSE(rt->ipam_subnet_route());
AddIPAM("vn1", ipam_info, 3);
client->WaitForIdle();
InetUnicastRouteEntry *rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
InetUnicastRouteEntry *rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
InetUnicastRouteEntry *rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt1 != NULL);
EXPECT_TRUE(rt2 != NULL);
EXPECT_TRUE(rt3 != NULL);
EXPECT_TRUE(rt1->ipam_subnet_route() == true);
EXPECT_TRUE(rt2->ipam_subnet_route() == true);
EXPECT_TRUE(rt3->ipam_subnet_route() == true);
EXPECT_TRUE(RouteFind(vrf_name_, remote_subnet_ip_, 29));
rt = RouteGet(vrf_name_, remote_subnet_ip_, 29);
EXPECT_TRUE(rt->ipam_subnet_route());
//On IPAM going off remote route should remove its flood flag.
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
EXPECT_FALSE(rt->ipam_subnet_route());
client->Reset();
DeleteRoute(NULL, vrf_name_, remote_subnet_ip_, 29);
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
}
TEST_F(RouteTest, null_ip_subnet_add) {
Ip4Address null_subnet_ip;
null_subnet_ip = Ip4Address::from_string("0.0.0.0");
AddRemoteVmRoute(null_subnet_ip, fabric_gw_ip_, 0, MplsTable::kStartLabel);
EXPECT_TRUE(RouteFind(vrf_name_, null_subnet_ip, 0));
InetUnicastRouteEntry *rt = RouteGet(vrf_name_, null_subnet_ip, 0);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::TUNNEL);
EXPECT_FALSE(rt->ipam_subnet_route());
DeleteRoute(NULL, vrf_name_, null_subnet_ip, 0);
EXPECT_FALSE(RouteFind(vrf_name_, null_subnet_ip, 0));
}
// Test case checks for arp and proxy flag setting
// on different routes.
TEST_F(RouteTest, route_arp_flags_1) {
client->Reset();
BgpPeer *peer = CreateBgpPeer("127.0.0.1", "remote");
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
{"2.2.2.100", 28, "2.2.2.200", true},
{"3.3.3.0", 16, "3.3.30.200", true},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 3);
client->WaitForIdle();
//Local unicast route arp proxy and flood check
InetUnicastRouteEntry *uc_rt = RouteGet(vrf_name_,
Ip4Address::from_string("1.1.1.1"),
32);
//In ksync binding decides if flood flag i.e. ipam_subnet_route needs to be
//enabled or not.
EXPECT_FALSE(uc_rt->ipam_subnet_route());
EXPECT_FALSE(uc_rt->proxy_arp());
//Subnet routes check
InetUnicastRouteEntry *rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
InetUnicastRouteEntry *rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
InetUnicastRouteEntry *rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt1 != NULL);
EXPECT_TRUE(rt2 != NULL);
EXPECT_TRUE(rt3 != NULL);
//All ipam added gateway routes should have flood enabled and proxy disabled
//for arp.
EXPECT_TRUE(rt1->ipam_subnet_route());
EXPECT_TRUE(rt2->ipam_subnet_route());
EXPECT_TRUE(rt3->ipam_subnet_route());
EXPECT_FALSE(rt1->proxy_arp());
EXPECT_FALSE(rt2->proxy_arp());
EXPECT_FALSE(rt3->proxy_arp());
//Add remote route for ipam gw (1.1.1.0/24) with bgp peer, route flag should
//be retained.
Inet4TunnelRouteAdd(peer, vrf_name_, subnet_vm_ip_1_, 24, server1_ip_,
TunnelType::MplsType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
EXPECT_TRUE(rt1->GetPathList().size() == 2);
EXPECT_TRUE(rt1->ipam_subnet_route());
EXPECT_FALSE(rt1->proxy_arp());
//Delete 1.1.1.0/24 from BGP peer, no change in flags
DeleteRoute(peer, vrf_name_, subnet_vm_ip_1_, 24);
client->WaitForIdle();
rt1 = RouteGet(vrf_name_, subnet_vm_ip_1_, 24);
EXPECT_TRUE(rt1->GetPathList().size() == 1);
EXPECT_TRUE(rt1->ipam_subnet_route());
EXPECT_FALSE(rt1->proxy_arp());
//Add remote route for ipam gw (3.3.3.0/16) with bgp peer, route flag should
//be retained.
Inet4TunnelRouteAdd(peer, vrf_name_, subnet_vm_ip_3_, 16, server1_ip_,
TunnelType::MplsType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt3->GetPathList().size() == 2);
EXPECT_TRUE(rt3->ipam_subnet_route());
EXPECT_FALSE(rt3->proxy_arp());
//Add another smaller subnet, should inherit 3.3.0.0/16 flags
Ip4Address smaller_subnet_3;
smaller_subnet_3 = Ip4Address::from_string("3.3.3.3");
Inet4TunnelRouteAdd(peer, vrf_name_, smaller_subnet_3, 28, server1_ip_,
TunnelType::MplsType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
rt3 = RouteGet(vrf_name_, smaller_subnet_3, 28);
EXPECT_TRUE(rt3->ipam_subnet_route());
EXPECT_FALSE(rt3->proxy_arp());
//Delete Ipam path for 3.3.3.0/16 and there shud be only one path
//i.e. from remote peer and flags should be toggled. Proxy - yes,
//flood - no.
IpamInfo ipam_info_2[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
{"2.2.2.100", 28, "2.2.2.200", true},
};
AddIPAM("vn1", ipam_info_2, 2);
client->WaitForIdle();
rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt3->GetPathList().size() == 1);
EXPECT_FALSE(rt3->ipam_subnet_route());
EXPECT_FALSE(rt3->proxy_arp());
//Smaller subnet 3.3.3.3/28 also toggles
rt3 = RouteGet(vrf_name_, smaller_subnet_3, 28);
EXPECT_FALSE(rt3->ipam_subnet_route());
EXPECT_FALSE(rt3->proxy_arp());
//Add back IPAM 3.3.0.0/16 and see flags are restored.
AddIPAM("vn1", ipam_info, 3);
client->WaitForIdle();
rt3 = RouteGet(vrf_name_, subnet_vm_ip_3_, 16);
EXPECT_TRUE(rt3->ipam_subnet_route());
EXPECT_FALSE(rt3->proxy_arp());
//Smaller subnet 3.3.3.3/28 also toggles
rt3 = RouteGet(vrf_name_, smaller_subnet_3, 28);
EXPECT_TRUE(rt3->ipam_subnet_route());
EXPECT_FALSE(rt3->proxy_arp());
//Add back IPAM 3.3.0.0/16 and see flags are restored.
//Delete the 3.3.3.0/16 remotr route
DeleteRoute(peer, vrf_name_, subnet_vm_ip_3_, 16);
DeleteRoute(peer, vrf_name_, smaller_subnet_3, 28);
client->WaitForIdle();
//Add and delete a super net 2.2.2.0/24 and see 2.2.2.100/28 is not impacted
Ip4Address subnet_supernet_2;
subnet_supernet_2 = Ip4Address::from_string("2.2.2.0");
Inet4TunnelRouteAdd(peer, vrf_name_, subnet_supernet_2, 24, server1_ip_,
TunnelType::MplsType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
rt2 = RouteGet(vrf_name_, subnet_supernet_2, 24);
EXPECT_FALSE(rt2->ipam_subnet_route());
EXPECT_TRUE(rt2->proxy_arp());
rt2 = RouteGet(vrf_name_, subnet_vm_ip_2_, 28);
EXPECT_TRUE(rt2->ipam_subnet_route());
EXPECT_FALSE(rt2->proxy_arp());
//Delete super net
DeleteRoute(peer, vrf_name_, subnet_vm_ip_2_, 24);
client->WaitForIdle();
EXPECT_TRUE(rt2->ipam_subnet_route());
EXPECT_FALSE(rt2->proxy_arp());
//Add any arbitrary remote route outside ipam say 4.4.4.0/24
//proxy - yes, flood - no
Ip4Address subnet_vm_ip_non_ipam;
subnet_vm_ip_non_ipam = Ip4Address::from_string("4.4.4.0");
Inet4TunnelRouteAdd(peer, vrf_name_, subnet_vm_ip_non_ipam, 24, server1_ip_,
TunnelType::MplsType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
InetUnicastRouteEntry *rt4 = RouteGet(vrf_name_, subnet_vm_ip_non_ipam, 24);
EXPECT_FALSE(rt4->ipam_subnet_route());
EXPECT_TRUE(rt4->proxy_arp());
//Add another smaller subnet in 4.4.4.0/24 say 4.4.4.10/28
//proxy - yes, flood -no
Ip4Address subnet_vm_ip_non_ipam_2;
subnet_vm_ip_non_ipam_2 = Ip4Address::from_string("4.4.4.10");
Inet4TunnelRouteAdd(peer, vrf_name_, subnet_vm_ip_non_ipam_2, 28,
server1_ip_,
TunnelType::MplsType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
InetUnicastRouteEntry *rt5 = RouteGet(vrf_name_, subnet_vm_ip_non_ipam_2, 28);
EXPECT_FALSE(rt5->ipam_subnet_route());
EXPECT_TRUE(rt5->proxy_arp());
//Add super net 4.4.0.0/16
Ip4Address subnet_vm_ip_non_ipam_3;
subnet_vm_ip_non_ipam_3 = Ip4Address::from_string("4.4.0.0");
Inet4TunnelRouteAdd(peer, vrf_name_, subnet_vm_ip_non_ipam_3, 16,
server1_ip_,
TunnelType::MplsType(), MplsTable::kStartLabel,
vrf_name_,
SecurityGroupList(), PathPreference());
client->WaitForIdle();
InetUnicastRouteEntry *rt6 = RouteGet(vrf_name_, subnet_vm_ip_non_ipam_3, 16);
EXPECT_FALSE(rt6->ipam_subnet_route());
EXPECT_TRUE(rt6->proxy_arp());
//Delete all these external prefixes 4.4.0.0 and keep checking flags dont
//change
DeleteRoute(peer, vrf_name_, subnet_vm_ip_non_ipam, 24);
client->WaitForIdle();
EXPECT_FALSE(rt5->ipam_subnet_route());
EXPECT_TRUE(rt5->proxy_arp());
EXPECT_FALSE(rt6->ipam_subnet_route());
EXPECT_TRUE(rt6->proxy_arp());
DeleteRoute(peer, vrf_name_, subnet_vm_ip_non_ipam_2, 28);
client->WaitForIdle();
EXPECT_FALSE(rt6->ipam_subnet_route());
EXPECT_TRUE(rt6->proxy_arp());
DeleteRoute(peer, vrf_name_, subnet_vm_ip_non_ipam_3, 16);
client->WaitForIdle();
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
DeleteBgpPeer(peer);
client->WaitForIdle();
}
TEST_F(RouteTest, NonEcmpToEcmpConversion) {
struct PortInfo input2[] = {
{"vnet11", 11, "2.1.1.1", "00:00:00:01:01:01", 8, 11},
{"vnet12", 12, "2.1.1.1", "00:00:00:02:02:01", 8, 12},
{"vnet13", 13, "2.1.1.1", "00:00:00:02:02:01", 8, 13},
};
//Add interface in non ecmp mode
CreateVmportEnv(input2, 3);
client->WaitForIdle();
Ip4Address ip = Ip4Address::from_string("2.1.1.1");
InetUnicastRouteEntry *rt = RouteGet("vrf8", ip, 32);
EXPECT_TRUE(rt != NULL);
EXPECT_TRUE(rt->GetPathList().size() == 3);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::INTERFACE);
CreateVmportWithEcmp(input2, 3);
client->WaitForIdle();
EXPECT_TRUE(rt->GetPathList().size() == 4);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::COMPOSITE);
CreateVmportEnv(input2, 3);
client->WaitForIdle();
EXPECT_TRUE(rt->GetPathList().size() == 3);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::INTERFACE);
DeleteVmportEnv(input2, 3, true);
client->WaitForIdle();
EXPECT_TRUE(VrfFind("vrf8", true) == false);
}
TEST_F(RouteTest, Dhcp_enabled_ipam) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 1);
client->WaitForIdle();
//Find Bridge route
BridgeRouteEntry *rt =
L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:01"),
Ip4Address::from_string("1.1.1.1"));
const AgentPath *path = rt->FindMacVmBindingPath();
const MacVmBindingPath *dhcp_path = dynamic_cast<const MacVmBindingPath *>(path);
EXPECT_TRUE(dhcp_path != NULL);
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(1));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == false);
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
}
TEST_F(RouteTest, Dhcp_disabled_ipam) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", false},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 1);
client->WaitForIdle();
//Find Bridge route
BridgeRouteEntry *rt =
L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:01"),
Ip4Address::from_string("1.1.1.1"));
const MacVmBindingPath *dhcp_path =
dynamic_cast<const MacVmBindingPath *>(rt->FindMacVmBindingPath());
EXPECT_TRUE(dhcp_path != NULL);
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(1));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == true);
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
}
TEST_F(RouteTest, Dhcp_mode_toggled_ipam) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
{"vnet2", 2, "1.1.1.2", "00:00:00:01:01:02", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
};
client->Reset();
CreateVmportEnv(input, 2, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 1);
client->WaitForIdle();
//Find Bridge route
BridgeRouteEntry *rt =
L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:01"),
Ip4Address::from_string("1.1.1.1"));
const AgentPath *path = rt->FindMacVmBindingPath();
const MacVmBindingPath *dhcp_path = dynamic_cast<const MacVmBindingPath *>(path);
EXPECT_TRUE(dhcp_path != NULL);
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(1));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == false);
rt = L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:02"),
Ip4Address::from_string("1.1.1.2"));
dhcp_path = dynamic_cast<const MacVmBindingPath *>(rt->FindMacVmBindingPath());
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(2));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == false);
//Toggle to disable
IpamInfo ipam_info_disabled[] = {
{"1.1.1.0", 24, "1.1.1.200", false},
};
AddIPAM("vn1", ipam_info_disabled, 1);
client->WaitForIdle();
rt = L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:01"),
Ip4Address::from_string("1.1.1.1"));
dhcp_path = dynamic_cast<const MacVmBindingPath *>(rt->FindMacVmBindingPath());
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(1));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == true);
rt = L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:02"),
Ip4Address::from_string("1.1.1.2"));
dhcp_path = dynamic_cast<const MacVmBindingPath *>(rt->FindMacVmBindingPath());
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(2));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == true);
//Toggle to enable
AddIPAM("vn1", ipam_info, 1);
client->WaitForIdle();
rt = L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:01"),
Ip4Address::from_string("1.1.1.1"));
dhcp_path = dynamic_cast<const MacVmBindingPath *>(rt->FindMacVmBindingPath());
EXPECT_TRUE(dhcp_path != NULL);
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(1));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == false);
rt = L2RouteGet("vrf1",
MacAddress::FromString("00:00:00:01:01:02"),
Ip4Address::from_string("1.1.1.2"));
dhcp_path = dynamic_cast<const MacVmBindingPath *>(rt->FindMacVmBindingPath());
EXPECT_TRUE(dhcp_path->vm_interface()->GetUuid() == MakeUuid(2));
EXPECT_TRUE(dhcp_path->nexthop()->GetType() == NextHop::DISCARD);
EXPECT_TRUE(dhcp_path->flood_dhcp() == false);
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
DeleteVmportEnv(input, 2, 1, 0);
client->WaitForIdle();
}
//Double delete ARP route and verify that ARP NH
//get deleted, since we always enqueu RESYNC for arp NH change
//from ARP route deletiong path
TEST_F(RouteTest, ArpRouteDelete) {
ArpNHKey key(Agent::GetInstance()->fabric_vrf_name(), server1_ip_, false);
AddArp(server1_ip_.to_string().c_str(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
EXPECT_TRUE(FindNH(&key));
EXPECT_TRUE(GetNH(&key)->IsValid() == true);
//Delete Remote VM route
DelArp(server1_ip_.to_string(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
EXPECT_FALSE(RouteFind(vrf_name_, server1_ip_, 32));
DelArp(server1_ip_.to_string(), "0a:0b:0c:0d:0e:0f", eth_name_.c_str());
client->WaitForIdle();
EXPECT_FALSE(RouteFind(vrf_name_, server1_ip_, 32));
EXPECT_FALSE(FindNH(&key));
}
TEST_F(RouteTest, verify_channel_delete_results_in_path_delete) {
client->Reset();
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.1", "00:00:00:01:01:01", 1, 1},
};
IpamInfo ipam_info[] = {
{"1.1.1.0", 24, "1.1.1.200", true},
};
client->Reset();
CreateVmportEnv(input, 1, 0);
client->WaitForIdle();
AddIPAM("vn1", ipam_info, 1);
client->WaitForIdle();
BgpPeer *peer = CreateBgpPeer("127.0.0.1", "remote");
FillEvpnNextHop(peer, "vrf1", 1000, TunnelType::MplsType());
client->WaitForIdle();
//Get Channel and delete it.
AgentXmppChannel *ch = peer->GetAgentXmppChannel();
XmppChannelMock *xmpp_channel = static_cast<XmppChannelMock *>
(ch->GetXmppChannel());
AgentXmppChannel::HandleAgentXmppClientChannelEvent(ch, xmps::NOT_READY);
client->WaitForIdle();
client->Reset();
DelIPAM("vn1");
client->WaitForIdle();
FlushEvpnNextHop(peer, "vrf1", 0);
DeleteVmportEnv(input, 1, 1, 0);
client->WaitForIdle();
DeleteBgpPeer(NULL);
delete ch;
delete xmpp_channel;
client->WaitForIdle();
}
// https://bugs.launchpad.net/juniperopenstack/+bug/1458194
TEST_F(RouteTest, EcmpTest_1) {
ComponentNHKeyList comp_nh_list;
const Agent *agent = Agent::GetInstance();
int remote_server_ip = 0x0A0A0A0A;
int label = AllocLabel("ecmp_test_1");
int nh_count = 3;
//Create CNH
ComponentNHKeyList component_nh_list;
DBRequest nh_req(DBRequest::DB_ENTRY_ADD_CHANGE);
nh_req.key.reset(new CompositeNHKey(Composite::LOCAL_ECMP,
false,
component_nh_list,
vrf_name_));
nh_req.data.reset(new CompositeNHData());
agent->nexthop_table()->Enqueue(&nh_req);
client->WaitForIdle();
//Attach CNH to MPLS
DBRequest req1;
req1.oper = DBRequest::DB_ENTRY_ADD_CHANGE;
MplsLabelKey *key1 = new MplsLabelKey(MplsLabel::VPORT_NH, label);
req1.key.reset(key1);
MplsLabelData *data1 = new MplsLabelData(Composite::LOCAL_ECMP, true,
component_nh_list,
vrf_name_);
req1.data.reset(data1);
agent->mpls_table()->Enqueue(&req1);
client->WaitForIdle();
BgpPeer *peer = CreateBgpPeer("127.0.0.1", "remote");
client->WaitForIdle();
MplsLabel *mpls =
agent->mpls_table()->FindMplsLabel(label);
DBEntryBase::KeyPtr key_tmp = mpls->nexthop()->GetDBRequestKey();
NextHopKey *comp_nh_key = static_cast<NextHopKey *>(key_tmp.release());
std::auto_ptr<const NextHopKey> nh_key_ptr(comp_nh_key);
ComponentNHKeyPtr component_nh_key(new ComponentNHKey(label,
nh_key_ptr));
comp_nh_list.push_back(component_nh_key);
for(int i = 1; i < nh_count; i++) {
ComponentNHKeyPtr comp_nh(new ComponentNHKey((label + i),
Agent::GetInstance()->fabric_vrf_name(),
Agent::GetInstance()->router_id(),
Ip4Address(remote_server_ip++),
false, TunnelType::AllType()));
comp_nh_list.push_back(comp_nh);
}
SecurityGroupList sg_id_list;
TaskScheduler::GetInstance()->Stop();
//Move label to tunnel enqueue request
DBRequest req;
req.oper = DBRequest::DB_ENTRY_ADD_CHANGE;
MacAddress vm_mac = MacAddress::FromString("00:00:01:01:01:10");
MplsLabelKey *key = new MplsLabelKey(MplsLabel::VPORT_NH, label);
req.key.reset(key);
MplsLabelData *data = new MplsLabelData("vnet1", false,
InterfaceNHFlags::INET4,
vm_mac);
req.data.reset(data);
agent->mpls_table()->Enqueue(&req);
//Now add ecmp tunnel add request
EcmpTunnelRouteAdd(peer, vrf_name_, remote_vm_ip_, 32,
comp_nh_list, false, "test", sg_id_list,
PathPreference());
client->WaitForIdle();
TaskScheduler::GetInstance()->Start();
//DeleteRoute(vrf_name_.c_str(), remote_vm_ip_, 32, peer);
Agent::GetInstance()->fabric_inet4_unicast_table()->
DeleteReq(peer, vrf_name_.c_str(), remote_vm_ip_, 32,
new ControllerVmRoute(peer));
MplsLabel::DeleteReq(agent, label);
client->WaitForIdle(5);
FreeLabel(label);
EXPECT_FALSE(RouteFind(vrf_name_, remote_vm_ip_, 32));
CompositeNHKey comp_key(Composite::ECMP, true, comp_nh_list, vrf_name_);
EXPECT_FALSE(FindNH(&comp_key));
DeleteBgpPeer(peer);
client->WaitForIdle();
}
TEST_F(RouteTest, fip_evpn_route_local) {
struct PortInfo input[] = {
{"vnet1", 1, "1.1.1.10", "00:00:01:01:01:10", 1, 1},
};
client->Reset();
//Creation
CreateVmportFIpEnv(input, 1);
client->WaitForIdle();
//Create floating IP pool
AddFloatingIpPool("fip-pool1", 1);
AddFloatingIp("fip1", 1, "2.2.2.10");
AddLink("floating-ip", "fip1", "floating-ip-pool", "fip-pool1");
AddLink("floating-ip-pool", "fip-pool1", "virtual-network",
"default-project:vn1");
//Associate vnet1 with floating IP
AddLink("virtual-machine-interface", "vnet1", "floating-ip", "fip1");
client->WaitForIdle();
//Add a peer
BgpPeer *bgp_peer_ptr = CreateBgpPeer(Ip4Address(1), "BGP Peer1");
boost::shared_ptr<BgpPeer> bgp_peer =
bgp_peer_ptr->GetAgentXmppChannel()->bgp_peer_id_ref();
client->WaitForIdle();
//Search our evpn route
EvpnRouteEntry *rt = EvpnRouteGet("default-project:vn1:vn1",
MacAddress::FromString(input[0].mac),
Ip4Address::from_string("2.2.2.10"), 0);
EXPECT_TRUE(rt != NULL);
AgentPath *path = rt->FindLocalVmPortPath();
EXPECT_TRUE(path != NULL);
EXPECT_TRUE(rt->GetActivePath() == path);
EXPECT_TRUE(rt->GetActiveNextHop()->GetType() == NextHop::L2_RECEIVE);
//Reflect CN route and see if its added.
stringstream ss_node;
autogen::EnetItemType item;
SecurityGroupList sg;
item.entry.nlri.af = BgpAf::L2Vpn;
item.entry.nlri.safi = BgpAf::Enet;
item.entry.nlri.address="2.2.2.10/32";
item.entry.nlri.ethernet_tag = 0;
autogen::EnetNextHopType nh;
nh.af = Address::INET;
nh.address = agent_->router_ip_ptr()->to_string();
nh.label = rt->GetActiveLabel();
item.entry.next_hops.next_hop.push_back(nh);
item.entry.med = 0;
bgp_peer_ptr->GetAgentXmppChannel()->AddEvpnRoute("default-project:vn1:vn1",
"00:00:01:01:01:10",
&item);
client->WaitForIdle();
EXPECT_TRUE(rt->GetActivePath() != path);
client->WaitForIdle();
DeleteVmportFIpEnv(input, 1, true);
client->WaitForIdle();
DeleteBgpPeer(bgp_peer.get());
client->WaitForIdle();
}
int main(int argc, char *argv[]) {
::testing::InitGoogleTest(&argc, argv);
GETUSERARGS();
client = TestInit(init_file, ksync_init, true, false);
eth_itf = Agent::GetInstance()->fabric_interface_name();
RouteTest::SetTunnelType(TunnelType::MPLS_GRE);
int ret = RUN_ALL_TESTS();
RouteTest::SetTunnelType(TunnelType::MPLS_UDP);
ret += RUN_ALL_TESTS();
client->WaitForIdle();
TestShutdown();
delete client;
return ret;
}
| {
"alphanum_fraction": 0.6138558631,
"author": null,
"avg_line_length": 38.4147058824,
"converted": null,
"ext": "cc",
"file": null,
"hexsha": "9c5b2b3b5b08110fa5e4fb675a152a3df695224f",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "834302367f3ff81f1ce93f4036b6b3788dfd6994",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "sagarc-contrail/contrail-controller",
"max_forks_repo_path": "src/vnsw/agent/test/test_route.cc",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "834302367f3ff81f1ce93f4036b6b3788dfd6994",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "sagarc-contrail/contrail-controller",
"max_issues_repo_path": "src/vnsw/agent/test/test_route.cc",
"max_line_length": 108,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "834302367f3ff81f1ce93f4036b6b3788dfd6994",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "sagarc-contrail/contrail-controller",
"max_stars_repo_path": "src/vnsw/agent/test/test_route.cc",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 25835,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 91427
} |
Who we are
We are a group of people interested in communal living. We aim to share tools, projects, and ideas with other cooperatives cooperativeminded folks.
Getting involved
We meet monthly; the exact time and place varies. Join the the http://groups.google.com/group/dccnetwork DCCN mailing list here to find out about meeting time/place, and to join the discussion about cooperative communities.
Start Cooperative Housing workshops
DCCN hosts Start Cooperative Housing workshops to help people create cooperative households. The demand for living in cooperative communities is much larger than the supply, so DCCN wants to help seed new cooperative households.
DCCN also contributes to the How to live in a cooperative page. (Add to it if you have suggestions or wisdom of your own!)
Shared Applicant Pool
DCCN maintains a https://spreadsheets.google.com/viewform?formkeydFZibnZBb3BwSUszaXFIOXRxcjVZdlE6MQ&ifq shared applicant pool for people interested in applying to a cooperative community, so they may be contacted by any cooperative community with openings.
If you live in a cooperative community and want to contact prospective new community members, contact Nick Barry, of N Street Cohousing: (530) 213 3312, or contact him here: http://www.nicholasbarry.com/about.html
Coop and Cohousing Conference
In July 2010 the Davis Cooperative Community Fund awarded DCCN with a grant to support a local coop and cohousing conference. The California Center for Cooperative Development is acting as fiscal sponsor.
History
DCCN was formed in fall 2009 by the members of several cohousing and housing cooperatives in Davis. The vision for the network was born out of ongoing discussions between members of the California Center for Cooperative Development, Dos Pinos Cooperative, the Solar Community Housing Association, Pacifico Student Coop, the Domes & TriCooperatives on the UC campus, and N Street Cohousing in Davis, California. Our goal is to unite local cooperative housing organizations to strengthen our community through the sharing of ideas, program models, and material resources. Our first event was held on Sunday, April 18th at the common house of N Street Cohousing.
| {
"alphanum_fraction": 0.8196721311,
"author": null,
"avg_line_length": 84.4615384615,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "33a0878770c2d4a77a0a1a1c70be119ba4c59765",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "voflo/Search",
"max_forks_repo_path": "lab/davisWiki/Davis_Cooperative_Community_Network.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "voflo/Search",
"max_issues_repo_path": "lab/davisWiki/Davis_Cooperative_Community_Network.f",
"max_line_length": 660,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "voflo/Search",
"max_stars_repo_path": "lab/davisWiki/Davis_Cooperative_Community_Network.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 468,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2196
} |
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
import os
import seaborn as sns
import pandas as pd
import glob
from sklearn.metrics.pairwise import cosine_distances, cosine_similarity
from itertools import combinations_with_replacement
from skimage.transform import resize
from matplotlib import offsetbox
from src.modules.utils import *
def show_grid(imgs):
n = len(imgs)
rows = int(np.ceil(np.sqrt(n)))
cols = int(np.ceil(1.0 * n / rows))
fig, ax = plt.subplots(rows, cols, figsize=(16, 12))
ax = np.array(ax).flatten()
for i, img in enumerate(imgs):
ax[i].imshow(img)
fig.tight_layout()
def show_random_sample(root, n_images=25):
files = np.array([os.path.join(root, p) for p in os.listdir(root)])
n = len(files)
subset = np.random.choice(n, n_images, replace=False)
sample = files[subset]
imgs = [io.imread(p) for p in sample]
show_grid(imgs)
def find_similary_from_dataset(img_path, dataset, get_features_f, k=10, similarity=cosine_similarity):
features = get_features_f(img_path)
db_features = dataset.features
dists = similarity([features], db_features)[0]
closest_indices = dists.argsort()[-k:][::-1]
return dataset.imgs[closest_indices]
def plot_closest_results(img_path, get_features_f, dataset, k=12):
closest = find_similary_from_dataset(img_path, dataset, get_features_f, k)
plt.figure(figsize=(20, 15))
rows = int(np.ceil(np.sqrt(k)))
cols = int(np.ceil(k/rows))
test_img = io.imread(img_path)
plt.figure(figsize=(16, 8))
plt.imshow(test_img)
plt.title("Query image - %s" % img_path)
plt.show()
fig, ax = plt.subplots(rows, cols, figsize=(16,12))
ax = np.array(ax).flatten()
for i, img in enumerate(closest):
ax[i].imshow(img)
fig.tight_layout()
plt.show()
def plot_embedding(X, y, images, title=None, figsize=(20,20), img_size=(65 ,65)):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure(figsize=figsize)
ax = plt.subplot(111)
colormap = plt.cm.gist_ncar
colors = [colormap(i) for i in np.linspace(0, 1,len(set(y)))]
for i in range(X.shape[0]):
plt.scatter(X[i, 0], X[i, 1], color=colors[y[i]])
if hasattr(offsetbox, 'AnnotationBbox'):
shown_images = np.array([[1., 1.]]) # just something big
for i in range(images.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 6e-3:
## don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
img = resize(images[i], img_size)
imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(img, cmap=plt.cm.gray_r), X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
def pca_summary(pca, standardised_data, out=True):
names = [str(i) for i in range(1, len(pca.explained_variance_ratio_) + 1)]
a = list(np.std(pca.transform(standardised_data), axis=0))
b = list(pca.explained_variance_ratio_)
c = [np.sum(pca.explained_variance_ratio_[:i]) for i in range(1, len(pca.explained_variance_ratio_) + 1)]
columns = pd.Index(["sdev", "varproportion", "cumultiveproportion"])
summary = pd.DataFrame(list(zip(a, b, c)), index=names, columns=columns)
return summary
def screeplot(pca, standardised_values):
y = np.std(pca.transform(standardised_values), axis=0) ** 2
x = np.arange(len(y)) + 1
x = x[:50]
y = y[:50]
print(x.shape, y.shape)
plt.plot(x[:500], y[:500], "o-")
plt.xticks(x[:500], ["Comp." + str(i) for i in x[:500]], rotation=60)
plt.ylabel("Variance")
plt.show()
def get_features(dir_path, dataset, features):
lista = list(map(lambda x: dataset.path_to_id[os.path.basename(x)], glob.glob('%s/*' % dir_path)))
return features[lista]
def compute_metrics(root_path_of_clusters, features, metrics, verbose=True):
# ako je ime slike 0001 tada se na indexu 0 u sorted_index nalazi index slike u dataset.paths i dataset.imgs
clusters = sorted(glob.glob('%s/*' % root_path_of_clusters))
diag = []
upper_triang = []
for d1, d2 in combinations_with_replacement(clusters, 2):
out = metrics(get_features(d1, features), get_features(d2, features))
if verbose:
print("Cluster %s-%s" % (os.path.basename(d1), os.path.basename(d2)))
print("\t min: %f" % out.min())
print("\t max: %f" % out.max())
print("\t mean: %f" % out.mean())
if int(os.path.basename(d1)) == int(os.path.basename(d2)):
diag.append(out.mean())
elif int(os.path.basename(d1)) < int(os.path.basename(d2)):
upper_triang.append(out.mean())
diag_mean = np.array(diag).mean()
upper_triag_mean = np.array(upper_triang).mean()
if verbose:
print("Mean of metrics within clusters: %f" % diag_mean)
print("Mean of metrics between clusters: %f" % upper_triag_mean)
return diag_mean, upper_triag_mean | {
"alphanum_fraction": 0.6434951456,
"author": null,
"avg_line_length": 35.5172413793,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "077f3438296d1a9c3d1eeb9f4c1bd4584b850f73",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f8466ef44270edc08a93498d12668b4e5f0a56ec",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mratkovic/mozgalo2017",
"max_forks_repo_path": "src/modules/dataset_utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f8466ef44270edc08a93498d12668b4e5f0a56ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mratkovic/mozgalo2017",
"max_issues_repo_path": "src/modules/dataset_utils.py",
"max_line_length": 112,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f8466ef44270edc08a93498d12668b4e5f0a56ec",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mratkovic/mozgalo2017",
"max_stars_repo_path": "src/modules/dataset_utils.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1401,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5150
} |
[STATEMENT]
lemma is_interval_translation[simp]:
"is_interval ((+) x ` X) = is_interval X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_interval ((+) x ` X) = is_interval X
[PROOF STEP]
using is_interval_neg_translationI[of "(+) x ` X" x]
[PROOF STATE]
proof (prove)
using this:
is_interval ((+) x ` X) \<Longrightarrow> is_interval ((-) x ` (+) x ` X)
goal (1 subgoal):
1. is_interval ((+) x ` X) = is_interval X
[PROOF STEP]
by (auto intro!: is_interval_translationI simp: image_image) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 197,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
\subsection{Episode 23: Illegal Fake Goblin Ham Meat Ring}
\DndDropCapLine{A}s we join the action, our “heroes” are busy busying themselves with busy work in Hope’s Rest, getting one last week of city life in before they’re stuck on a boat for an unknown amount of time.\medskip
Kolo is doing crime, Gary is fighting it. Together, they’ll keep each other in business forever, because also Gary keeps relapsing into his new drug addiction. For some reason, every time he falls asleep with his mouth open, he seems to get more addicted to Kolo’s drugs.\medskip
Esme presents Gary with his special new shoes. They light up and scoot along and they are the best shoes ever.\medskip
Esme then gives a pep-talk to her lab staff. It is not particularly peppy, but at least it is talky.\medskip
Derek Bobacious, everyone’s favourite old person, because who else would everyone’s favourite old person be, Archicunt? What a ridiculous concept…. arrives to look after the bank while Rocky’s looking after the stone thing. Derek’s here now.\medskip
The crew then rides off towards Port Averdale. Nothing happens. Nothing at all. All the way there. What is this shit?\medskip
They arrive in Port Averdale, bored as fuck.\medskip
It’s pretty fancy.\medskip
It is decided that in this town they should be a troop of travelling entertainers, for very little reason. Gary is a magician called The Pope, with Fishy as his glamorous assistant, and Kolo dressed in a rabbit suit, that Pilch would never have got on him if he wasn’t so high all the time.\medskip
Esme is a Gabrin banker called Hardrip.\medskip
Port Averdale is a cool place with forieng foods.\medskip
Gary’s on the lookout for a theatre. He finds an open-air theatre called The Triangle, and asks a local why it has 4 sides. Apparently it’s 2 triangles put together. Gary’s mind is\medskip
B L O W N\medskip
Hardrip tries to buy The Triangle with all that money he has.\medskip
They head towards (but not in) the sea, to find a boat.\medskip
Kolo hears that there’s two options, one with two sub-options, first an exploring ship, or, second, a ship with a captain who is mad or a captain who is unscrupulous.\medskip
FIshwank tries to fuck his way onto a boat. It for sure works. He fucks a whole bunch of sailors. Then he leaves the boat again, having achieved nothing but sex. And also money. Because of course.\medskip
Kolo goes off to investigate the potential of boats to Masuda. Gary goes off to a pub called Rum and Ham or some shit like that, where the unscrupulous captains go.\medskip
Gary orders a Rum Ham and a Ham Rum. He eats one and drinks the other, then asks the whole bar who the most unscrupulous captain is. No one puts their hands up, but one guy says he can tell him who is. Then, Gary asks them to say the name of the most unscrupulous captain they know. They all talk at the same time and he has no idea what they said.\medskip
Fishwank tries to fuck the barman but then gets bored when he thinks he’s poor.\medskip
Hardrip tries to invest in the bar, and ends up pimping out Fishy.\medskip
Gary talks to the guy who wanted to talk to him. Apparently Captain Someone will do anything for money, just like Fishy. Kolo ribbits, because he’s a boy frog rabbit and also because he is still SO HIGH.\medskip
Captain Someone has had a box seized by the Port Authority, who are apparently dicks, so it’s fine to steal from them. Apparently if we get it back, he might want to let us in his boat.\medskip
The gang go to visit Captain Someone on his boat, who is a bird.\medskip
He’s a bird man, with pretty colourful feathers and a nice hat. He’s pretty great.\medskip
Hardrip asks if the bird likes riddles. He does not. Neither does Hardrip.\medskip
Kolo strokes the feathery man.\medskip
They agree to steal the box and pay him 500gp and then he’ll take them all to the place.\medskip
Gary makes the feathery man agree that they’re going to leave at dusk tomorrow.\medskip
They all leave to go and do all the things, and say they’ll be back with the box, which is apparently full of spices.\medskip
Once outside, Gary announces a spectacular magic show magically spectacle spectacular. Everyone gets super hyped. By jove, this is going to be the best fucking magic show there’s ever been in this fucking town. Look at that rabbit. And that beautiful woman. The magic show is going to be at sundown tomorrow at The Triangle.\medskip
This is when they’ll go on the boat, and everyone will think they’ll be doing a magic show on the other side of town. He makes sure that all the authorities and church people hear him.\medskip
When everyone realises that they don’t actually get to do a magic show, they get a bit sad.\medskip
The plan is finalised:\medskip
1) A box labelled “HAM” will be stolen from Ham and Rum Kolo will feign death within the box\medskip
2) Gary, in his Police capacity as well as his Magician capacity, will report to the Authorities that he saw some people moving boxes that said “HAM” but sounded like Gabrins\medskip
3) The Authorities will investigate the crates and find one living Gabrin and a dead Gabrin (because Kolo has a ring that makes him look dead)\medskip
4) They’ll impound the box, because there’s clearly some weird fake ham ring going on\medskip
5) Kolo will break out and steal the box\medskip
Points 1 to 4 go off without a hitch, the Authorities open the box and find not ham, but a live Gabrin and a dead Gabrin dressed as a rabbit, and Kolo gets taken off into the impound.\medskip
Esme and Gary go off to Rum and Ham for a nice night, because they don’t hang out enough. Esme recommends him some new foods to try.\medskip
Fishwank goes to keep an eye of the impound warehouse place.\medskip
Kolo is in the box, still dressed as a rabbit. He maybe hears something else in the box house. Maybe not, though.\medskip
After a few hours of being dead, he cuts himself out of the box.\medskip
He goes snooping in the boxes and finds lots of hollowed-out vegetables. Some have been used to smuggle drugs. Some have… not… been used for that.\medskip
They’re fuckfruits.\medskip
Fruits you can fuck.\medskip
Kolo does not find a big box full of spice, which is what we want.\medskip
Kolo tries to look under a door and does it so badly that his eye bleeds. Then he opens the door instead.\medskip
A whole shit-tonne of guards are playing cards on the other side. They are not “Playing Cards”.\medskip
Kolo quietly empties a box, cuts eyeholes in it, and shuffles along the ground. It is great. Nothing could go better.\medskip
Kolo realises that the best solution to this problem is to set fire to all the boxes.\medskip
It is not currently known if the important box is on fire.\medskip
Let’s face it, it probably is.\medskip
Kolo is still in a box, shuffling around this burning room.\medskip
The two nastiest guards run out to get some water from somewhere.\medskip
Kolo NOW starts checking boxes. He finds the important box. It’s quite near the fire.\medskip
NEVER FEAR! Fishwank is here, and he’s the local fire marshal. The guards, unreasonably, expect him to be able to put out the fire.\medskip
Clearly, this small fire means that this whole building is a write off and they should take all the flammable things, including spice, out of the building before it causes more damage.\medskip
Somehow, all of this works out pretty much fine.\medskip
The other guards turn up, along with some for-real firemen who know how to put out fires for some reason. They don’t stop him leaving, because they think they can put out the fire, for some reason.\medskip
The guards save the box of spice, on Fishwank’s command.\medskip
Fishwank liberates the barrel containing Kolo.\medskip
Suddenly, one of the guards carrying the spicebox falls asleep. It definitely to do with the invisible smoke, and not the blowdart that someone who couldn’t be Kolo, because Kolo’s dead, definitely didn’t fire at him.\medskip
The rest of the guards help Pilch get the carts to the ports where they’ll be safe from the invisible fire.\medskip
Again, everything goes fine. It’s weird.\medskip
The awesome alibi magic show isn’t even needed.\medskip
LATER, AFTER CELEBRATIONS:\medskip
The gang are going to the ship, with the important box, a box of Rum Ham, and a barrel of Ham Rum.\medskip
Gary’s looking at the sky, feeling happy, so he definitely spots that they’re being followed by a load of shadowy guys and a floating guy who looks like that scary guy they met underground that time.\medskip
They stand about, discussing how that’s probably not great, when all of a sudden, Gary stops working. Some dicks start lurking out of the alleyway at them. Kolo puts some shit in Gary’s mouth, but he still can’t move.\medskip
Fishy’s Woolfe puts himself under Gary, and everyone except Esme, starts running (or being run, in Gary’s case) away.\medskip
Esme is too slow, falls behind the rest and gets flung up in the air by some crazy force.\medskip
Gary regains control of his legs, jumps off the wolf, and just absolutely pegs it like some weirdass bull, using the combined powers of sprinting, drugs and heelies, he scoops up and picks up Esme as weird armoured… people....? run towards her. Kolo runs only slightly slower than him and fires a shot off.\medskip
They run off towards the boat, and get Captain Something to drive the boat away as fast as he can, because the Inquisition have taken an interest in his spice, if you know what I mean.\medskip
FIshwank fires a ballista at them. It misses.\medskip
Some of them fall over on some ballbearings.\medskip
Everyone gets on the boat, and the armoured guys are left on the shore like dickheads.\medskip
The boat’s sailing away, but whut’s all this? It’s not moving.\medskip
AHHH THE GUY’S DOING SOMETHING FUCKED UP!!!!!!\medskip
The boat’s stopped. Everyone’s going to die.\medskip
But what’s this?\medskip
Kolo makes a small movement to himself, not looking like he expects much. All of a sudden, a light appears behind his eyes. The boat jerks forward, gets pulled back slightly, and then…. BAM! It flies forwards, free from the magical force that was holding it in place.\medskip
Kolo drops unconscious and the group sail off into the silent star-filled night, towards the mysterious continent, from whose shore none return. | {
"alphanum_fraction": 0.7862781405,
"author": null,
"avg_line_length": 120.7176470588,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "3e6911cb026090e85f814dbe04f70426d78a846c",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-10-04T09:40:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-04T09:40:24.000Z",
"max_forks_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mcgi5sr2/velterraBook",
"max_forks_repo_path": "content/eps/23.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mcgi5sr2/velterraBook",
"max_issues_repo_path": "content/eps/23.tex",
"max_line_length": 356,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "23763424cf31c50618bc6ddeefe2196cdf6be974",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mcgi5sr2/velterraBook",
"max_stars_repo_path": "content/eps/23.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2543,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 10261
} |
import cv2
import numpy as np
import tensorflow as tf
OUTPUT_PATH = "../events/"
def with_update_op():
input_node = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32, name='input_node')
training_node = tf.placeholder_with_default(True, (), name='training')
net = tf.layers.conv2d(input_node, 32, (3, 3), strides=(2, 2), padding='same', name='conv_1')
net = tf.layers.batch_normalization(net, training=training_node, name='bn')
moving_mean = tf.get_default_graph().get_tensor_by_name(
"bn/moving_mean/read:0")
moving_var = tf.get_default_graph().get_tensor_by_name(
"bn/moving_variance/read:0")
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print(f'update_op: {update_op}')
with tf.control_dependencies(update_op):
train_op = tf.identity(net, name='train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
image = cv2.imread('../05/ithome.jpg')
image = np.expand_dims(image, 0)
for _ in range(100):
sess.run(train_op, feed_dict={input_node: image})
result, mm, mv = sess.run([net, moving_mean, moving_var], feed_dict={input_node: image, training_node: False})
print(f'with_update_op:\n(mm , mv) : ({mm[0]:.2f} , {mv[0]:.2f})\n{result[0, 22:28, 22:28, 0]}')
tf.summary.FileWriter(OUTPUT_PATH, graph=tf.get_default_graph())
def without_update_op():
input_node = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32, name='input_node')
training_node = tf.placeholder_with_default(True, (), name='training')
net = tf.layers.conv2d(input_node, 32, (3, 3), strides=(2, 2), padding='same', name='conv_1')
net = tf.layers.batch_normalization(net, training=training_node, name='bn')
moving_mean = tf.get_default_graph().get_tensor_by_name(
"bn/moving_mean/read:0")
moving_var = tf.get_default_graph().get_tensor_by_name(
"bn/moving_variance/read:0")
train_op = tf.identity(net, name='train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
image = cv2.imread('../05/ithome.jpg')
image = np.expand_dims(image, 0)
for _ in range(10):
sess.run(train_op, feed_dict={input_node: image})
result, mm, mv = sess.run([net, moving_mean, moving_var], feed_dict={input_node: image, training_node: False})
print(f'without_update_op:\n(mm , mv) : ({mm[0]:.2f} , {mv[0]:.2f})\n{result[0, 22:28, 22:28, 0]}')
if __name__ == '__main__':
with_update_op()
tf.reset_default_graph()
without_update_op()
| {
"alphanum_fraction": 0.6650735294,
"author": null,
"avg_line_length": 36.7567567568,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e983dd2438d53f490250a5e9826013f7c791ce12",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2022-03-15T08:52:26.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-06T17:11:25.000Z",
"max_forks_repo_head_hexsha": "e8f92de2a73a88e7b03a9ac58ece4c4a604f066e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jason9075/ithome_tensorflow_series",
"max_forks_repo_path": "17/update_op.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "e8f92de2a73a88e7b03a9ac58ece4c4a604f066e",
"max_issues_repo_issues_event_max_datetime": "2020-07-08T06:55:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-03T10:13:10.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "jason9075/ithome_tensorflow_series",
"max_issues_repo_path": "17/update_op.py",
"max_line_length": 118,
"max_stars_count": 24,
"max_stars_repo_head_hexsha": "e8f92de2a73a88e7b03a9ac58ece4c4a604f066e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jason9075/ithome_tensorflow_series",
"max_stars_repo_path": "17/update_op.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-15T08:52:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-06T17:11:23.000Z",
"num_tokens": 735,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2720
} |
# Let's do fancy stats, with some help from scipy
from scipy.stats import distributions
from numpy import sqrt
from numpy import corrcoef
def ttest_greaterthan(mean1,stdev1,n1,mean2,stdev2,n2):
"""Gives the p-value (1.0 - confidence) that the true mean1 is greater than the true mean2.
(This is a one-tailed t-test, and it assumes normal distributions with equal variance in the two distributions.
mean1 and mean2 are the observed means
stdev1 and stdev2 are the observed standard deviations
n1 and n2 are the number of points sampled
"""
if (n1 < 2 or n2 < 2):
raise Exception("Both distributions need to have been sampled at least twice (preferably > 30 times) to apply a t-test.")
num = mean1 - mean2 #difference in means
df = n1 + n2 - 2 #degrees of freedom
#denom is the standard error of the difference
denom = sqrt(((n1-1)*stdev1*stdev1+(n2-1)*stdev2*stdev2) / float(df) * (1.0/n1 + 1.0/n2))
if (num == 0):
t = 0
else:
t = num / denom
return distributions.t.sf(t,df)
def ttest_notequal(mean1,stdev1,n1,mean2,stdev2,n2):
"""Gives the p-value (1.0 - confidence) that mean1 is statistically different from mean2.
(This is a two-tailed t-test, and it assumes normal distributions with equal variance in the two distributions.
mean1 and mean2 are the observed means
stdev1 and stdev2 are the observed standard deviations
n1 and n2 are the number of points sampled
"""
pgreater = ttest_greaterthan(mean1,stdev1,n1,mean2,stdev2,n2)
if (pgreater > 0.5):
pgreater = 1.0 - pgreater
return 2 * pgreater
def gini(dd):
""" Gini inequality coefficient
Ranges between 0.0 (completely equally distributed) and
1.0 (all wealth/value concentrated at a single individual)"""
dd = sorted(dd,reverse=True)
N = len(dd)
sum1 = sum([dd[i] * (i + 1) for i in range(N)])
return float(N+1)/float(N-1) - 2.0 / (N * (N - 1) * mean(dd)) * sum1
def linearRegressionR2Val(xs,ys):
"""Returns the R^2 value for a linear regression"""
return (corrcoef(xs,ys)[0,1])**2
| {
"alphanum_fraction": 0.7023050515,
"author": null,
"avg_line_length": 39.9803921569,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "46b664ca3cfd67dd8b878d8461101cb5f956cc6c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2753d99da2dc840066490208da70c8bef8a88f49",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "hazim79/behaviorsearch",
"max_forks_repo_path": "scripts/fancystats.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2753d99da2dc840066490208da70c8bef8a88f49",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "hazim79/behaviorsearch",
"max_issues_repo_path": "scripts/fancystats.py",
"max_line_length": 124,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2753d99da2dc840066490208da70c8bef8a88f49",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "hazim79/behaviorsearch",
"max_stars_repo_path": "scripts/fancystats.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 608,
"path": null,
"reason": "from numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2039
} |
import numpy as np
def Kbeta(D_K, alpha):
if len(D_K.shape) == 2 or len(D_K.shape) == 1:
return alpha * D_K
K = np.zeros((D_K.shape[1], D_K.shape[2]))
for i, a in enumerate(alpha):
K += D_K[i] * a
return K | {
"alphanum_fraction": 0.6214953271,
"author": null,
"avg_line_length": 23.7777777778,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "00f95ffa68a9e9393c42c4b5ddfa1d0707bbea2f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0ceb42ea4e766fd1a1bcbb1ee17af369dbc890c9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "5966466/SIMLR-python",
"max_forks_repo_path": "SIMLR/src/Kbeta.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0ceb42ea4e766fd1a1bcbb1ee17af369dbc890c9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "5966466/SIMLR-python",
"max_issues_repo_path": "SIMLR/src/Kbeta.py",
"max_line_length": 47,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "0ceb42ea4e766fd1a1bcbb1ee17af369dbc890c9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "5966466/SIMLR-python",
"max_stars_repo_path": "SIMLR/src/Kbeta.py",
"max_stars_repo_stars_event_max_datetime": "2019-04-17T16:50:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-19T07:20:01.000Z",
"num_tokens": 76,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 214
} |
from __future__ import print_function
# %matplotlib inline
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
# Set random seed for reproducibility
manualSeed = 999
# manualSeed = random.randint(1, 10000) # use if you want new results
print("Random Seed: ", manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
# Root directory for dataset
dataroot = "data/celeba"
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 128
# Spatial size of training images. All images will be resized to this
# size using a transformer.
image_size = 64
# Number of channels in the training images. For color images this is 3
nc = 3
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 64
# Size of feature maps in discriminator
ndf = 64
# Number of training epochs
num_epochs = 5
# Learning rate for optimizers
lr = 0.0002
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1
| {
"alphanum_fraction": 0.7783286119,
"author": null,
"avg_line_length": 22.0625,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8b7fbbfdf498ab76614c6261ae6aa08fb79dccbd",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-02-07T12:39:46.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-07T12:39:46.000Z",
"max_forks_repo_head_hexsha": "1de5a06d37be392227f94fe3576b16ba3fe7a9a2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pppnnn/PyTorch-Tutorials",
"max_forks_repo_path": "tutorials/03-advanced/01-adversarial-network/main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1de5a06d37be392227f94fe3576b16ba3fe7a9a2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pppnnn/PyTorch-Tutorials",
"max_issues_repo_path": "tutorials/03-advanced/01-adversarial-network/main.py",
"max_line_length": 71,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1de5a06d37be392227f94fe3576b16ba3fe7a9a2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pppnnn/PyTorch-Tutorials",
"max_stars_repo_path": "tutorials/03-advanced/01-adversarial-network/main.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 363,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1412
} |
import numpy as np
from ..common.utils import logger, normalize
from .aad_globals import LODA
from .aad_base import Aad
from ..loda.loda import loda, get_all_hist_pdfs
from .data_stream import StreamingSupport
class AadLoda(Aad, StreamingSupport):
""" Wrapper over LODA
Attributes:
sparsity: float
mink: int
maxk: int
loda_model: LodaResult
The LODA model containing all projection vectors and histograms
"""
def __init__(self, sparsity=np.nan, mink=1, maxk=0, random_state=None):
Aad.__init__(self, LODA, random_state=random_state)
self.sparsity = sparsity
self.mink = mink
self.maxk = maxk
self.loda_model = None
self.m = None
def get_num_members(self):
"""Returns the number of ensemble members"""
return self.m
def fit(self, x):
self.loda_model = loda(x, self.sparsity, mink=self.mink, maxk=self.maxk)
self.m = self.loda_model.pvh.pvh.w.shape[1]
w = np.ones(self.m, dtype=float)
self.w = normalize(w)
logger.debug("LODA m: %d" % self.m)
def transform_to_ensemble_features(self, x, dense=False, norm_unit=False):
hpdfs = get_all_hist_pdfs(x, self.loda_model.pvh.pvh.w, self.loda_model.pvh.pvh.hists)
nlls = -np.log(hpdfs)
if norm_unit:
norms = np.sqrt(np.power(nlls, 2).sum(axis=1))
# logger.debug("norms before [%d/%d]:\n%s" % (start_batch, end_batch, str(list(norms.T))))
nlls = (nlls.T * 1 / norms).T
if False:
norms = np.sqrt(np.power(nlls, 2).sum(axis=1))
logger.debug("norms after:\n%s" % (str(list(norms))))
return nlls
def supports_streaming(self):
return False
def add_samples(self, X, current=False):
logger.warning("Model does not support stream update. Retaining old model.")
def update_model_from_stream_buffer(self):
# LODA implementation currently does not support this
pass
| {
"alphanum_fraction": 0.6284313725,
"author": null,
"avg_line_length": 33.4426229508,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b80b8b881aa9dbcc03c729390a80a47ffc6dc6f3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 197,
"max_forks_repo_forks_event_max_datetime": "2022-03-22T21:37:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-05-08T04:16:49.000Z",
"max_forks_repo_head_hexsha": "78b01e9c9502523c5341243e1a8dca6befcefbc3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "matwey/ad_examples",
"max_forks_repo_path": "ad_examples/aad/loda_aad.py",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "78b01e9c9502523c5341243e1a8dca6befcefbc3",
"max_issues_repo_issues_event_max_datetime": "2022-02-10T00:02:29.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-12-25T00:46:25.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "matwey/ad_examples",
"max_issues_repo_path": "ad_examples/aad/loda_aad.py",
"max_line_length": 102,
"max_stars_count": 773,
"max_stars_repo_head_hexsha": "78b01e9c9502523c5341243e1a8dca6befcefbc3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "matwey/ad_examples",
"max_stars_repo_path": "ad_examples/aad/loda_aad.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-28T01:50:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-10T04:08:47.000Z",
"num_tokens": 541,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2040
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import time
import datetime
from util import write_to_table_with_partition
import numpy as np
import faiss
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.types import StringType, StructField, StructType, FloatType, IntegerType, ArrayType, MapType
from util import resolve_placeholder, write_to_table_with_partition
'''
This process generates the top-n-similarity table.
spark-submit --master yarn --num-executors 20 --executor-cores 5 --executor-memory 16G --driver-memory 16G --conf spark.driver.maxResultSize=5g --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict top_n_similarity.py config.yml
spark-submit --executor-memory 16G --driver-memory 16G --conf spark.driver.maxResultSize=5g --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict top_n_similarity.py config.yml
The top-n-similarity table is
|user| top-N-similarity|top-n-users
|:-------------| :------------: |
|user-1-did| [similarity-score-11, similarity-score-12, similarity-score-13] |[user-did-1, user-did-2, user-did-3]|
|user-2-did| [similarity-score-21, similarity-score-22, similarity-score-23] |[user-did-10, user-did-20, user-did-30]|
|user-3-did| [similarity-score-31, similarity-score-32, similarity-score-33] |[user-did-23, user-did-87, user-did-45]|
'''
def load_score_vectors(spark_session, score_vector_table,
bucket, bucket_step, bucket_size):
# Run the query of the Hive data.
command = "SELECT did, did_bucket, score_vector FROM {} WHERE did_bucket BETWEEN {} AND {}".format(
score_vector_table, bucket, min(bucket+bucket_step-1, bucket_size))
df = spark_session.sql(command)
# Get the dids, score vectors, and buckets as numpy arrays.
# we need to collect them first and then seperate them, otherwise we cannot relate dids to scors
_all = df.select('did', 'score_vector', 'did_bucket').collect()
dids = np.array([_['did'] for _ in _all])
score_vectors = np.array([_['score_vector'] for _ in _all], dtype='f4')
buckets = np.array([_['did_bucket'] for _ in _all])
# Return the dids and score_vectors.
return (dids, score_vectors, buckets)
def run(spark_session, cfg):
score_vector_table = cfg['score_vector']['score_vector_table']
similarity_table = cfg['top_n_similarity']['similarity_table']
top_n_value = cfg['top_n_similarity']['top_n']
did_bucket_size = cfg['top_n_similarity']['did_bucket_size']
load_bucket_step = cfg['top_n_similarity']['load_bucket_step']
search_bucket_step = cfg['top_n_similarity']['search_bucket_step']
index_factory_string = cfg['top_n_similarity']['index_factory_string']
# If the number of GPUs is 0, uninstall faiss-cpu.
num_gpus = faiss.get_num_gpus()
assert num_gpus != 0
print('Number of GPUs available: {}'.format(num_gpus))
start_time = time.time()
# Load the score vectors into the index.
did_list = []
for did_bucket in range(0, did_bucket_size, load_bucket_step):
print('Loading buckets {} - {} of {}'.format(did_bucket, did_bucket+load_bucket_step-1, did_bucket_size))
(dids, score_vectors, _) = load_score_vectors(spark_session, score_vector_table,
did_bucket, load_bucket_step, did_bucket_size)
# Keep track of the dids.
if did_bucket == 0:
did_list = dids
else:
did_list = np.concatenate((did_list, dids))
# Create the FAISS index on the first iteration.
if did_bucket == 0:
cpu_index = faiss.index_factory(score_vectors.shape[1], index_factory_string)
gpu_index = faiss.index_cpu_to_all_gpus(cpu_index)
# we need to check if train is necessary, now it is disabled.
if not gpu_index.is_trained:
gpu_index.train(score_vectors)
# Add the vectors to the index.
gpu_index.add(score_vectors)
load_time = time.time()
# Find the top N by bucket step.
start_load = time.time()
mode = 'overwrite'
total_search_time = 0
total_load_time = 0
total_format_time = 0
total_write_time = 0
for did_bucket in range(0, did_bucket_size, search_bucket_step):
# Search for the top N similar users for bucket.
(dids, score_vectors, buckets) = load_score_vectors(spark_session, score_vector_table, did_bucket, search_bucket_step, did_bucket_size)
end_load = time.time()
total_load_time += end_load-start_load
top_n_distances, top_n_indices = gpu_index.search(score_vectors, top_n_value)
end_search = time.time()
total_search_time += end_search-end_load
# start_load = end_search
# continue
# Get the top N dids from the top N indexes.
top_n_dids = did_list[top_n_indices]
# Format and write the result back to Hive.
# Format the data for a Spark dataframe in order to write to Hive.
# [ ('0000001', [{'did':'0000001', 'score':1.73205081}, {'did':'0000003', 'score':1.73205081}, {'did':'0000004', 'score':0.88532267}, {'did':'0000002', 'score':0.66903623}], 0),
# ('0000002', [{'did':'0000002', 'score':1.73205081}, {'did':'0000004', 'score':1.50844401}, {'did':'0000001', 'score':0.66903623}, {'did':'0000003', 'score':0.66903623}], 0),
# ... ]
data = [(str(did), [(str(n_did), float(distance)) for n_did, distance in zip(top_did, top_distances)], int(bucket))
for did, top_did, top_distances, bucket in zip(dids, top_n_dids, top_n_distances, buckets)]
# Output dataframe schema.
schema = StructType([
StructField("did", StringType(), True),
StructField("top_n_similar_user", ArrayType(StructType([StructField('did', StringType(), False), StructField('score', FloatType(), False)]), True)),
StructField("did_bucket", IntegerType(), True)
])
# Create the output dataframe with the similar users for each user.
df = spark_session.createDataFrame(spark_session.sparkContext.parallelize(data), schema)
end_format = time.time()
total_format_time += end_format-end_search
# Write the output dataframe to Hive.
write_to_table_with_partition(df, similarity_table, partition=('did_bucket'), mode=mode)
mode = 'append'
end_write = time.time()
total_write_time += end_write-end_format
start_load = end_write
search_time = time.time()
print('Index size:', gpu_index.ntotal)
print(gpu_index.d)
print(4 * gpu_index.d * gpu_index.ntotal, 'bytes (uncompressed)')
print('Total time: ', str(datetime.timedelta(seconds=search_time-start_time)))
print(' Index load time: ', str(datetime.timedelta(seconds=load_time-start_time)))
print(' Overall search time: ', str(datetime.timedelta(seconds=search_time-load_time)))
print(' Total load time: ', str(datetime.timedelta(seconds=total_load_time)))
print(' Total search time: ', str(datetime.timedelta(seconds=total_search_time)))
print(' Total format time: ', str(datetime.timedelta(seconds=total_format_time)))
print(' Total write time: ', str(datetime.timedelta(seconds=total_write_time)))
if __name__ == "__main__":
start = time.time()
parser = argparse.ArgumentParser(description=" ")
parser.add_argument('config_file')
args = parser.parse_args()
with open(args.config_file, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)
resolve_placeholder(cfg)
spark_session = SparkSession.builder.enableHiveSupport().getOrCreate()
spark_session.sparkContext.setLogLevel('WARN')
run(spark_session, cfg)
print('Runtime of the program is:', str(datetime.timedelta(seconds=time.time() - start)))
| {
"alphanum_fraction": 0.6933470743,
"author": null,
"avg_line_length": 45.9631578947,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e031f9ee41bc46346736973110ac287c4c705947",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-01-12T17:59:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-08T19:46:09.000Z",
"max_forks_repo_head_hexsha": "5635d897fe4aee65e6549cd8cc5e52a6b191cbaa",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "radibnia77/blue-marlin",
"max_forks_repo_path": "Model/lookalike-model/lookalike_model/application/pipeline/top_n_similarity.py",
"max_issues_count": 23,
"max_issues_repo_head_hexsha": "2ed1ef8533c75e4172d8d09c9380de18c5f13aee",
"max_issues_repo_issues_event_max_datetime": "2022-03-07T16:16:32.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-09T20:37:02.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Faezehvaseghi/incubator-bluemarlin",
"max_issues_repo_path": "Model/lookalike-model/lookalike_model/application/pipeline/top_n_similarity.py",
"max_line_length": 290,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "2ed1ef8533c75e4172d8d09c9380de18c5f13aee",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Faezehvaseghi/incubator-bluemarlin",
"max_stars_repo_path": "Model/lookalike-model/lookalike_model/application/pipeline/top_n_similarity.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-25T11:52:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-08T17:17:34.000Z",
"num_tokens": 2120,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8733
} |
module randomm
use types
implicit none
private
public gasdev
contains
!! gasdev : Returns a normally distributed deviate with zero mean and unit variance from Numerical recipes
real(dp) function gasdev()
real(dp) :: v1, v2, fac, rsq
real(dp), save :: gset
logical, save :: available = .false.
if (available) then
gasdev = gset
available = .false.
else
do
call random_number(v1)
call random_number(v2)
v1 = 2.0_dp*v1-1.0_dp
v2 = 2.0_dp*v2-1.0_dp
rsq = v1*v1 + v2*v2
if ((rsq > 0.0_dp) .and. (rsq < 1.0_dp)) exit
end do
fac = sqrt(-2.0_dp * log(rsq) / rsq)
gasdev = v1 * fac
gset = v2 * fac
available = .true.
end if
end function gasdev
end module randomm | {
"alphanum_fraction": 0.568986569,
"author": null,
"avg_line_length": 22.75,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "5a902181168e020561dc0873542fe674050d006a",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4f072f58ada42c7dc34aa6b348a06ece8dc358fc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "edwinb-ai/brownian-dynamics",
"max_forks_repo_path": "include/random.f90",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4f072f58ada42c7dc34aa6b348a06ece8dc358fc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "edwinb-ai/brownian-dynamics",
"max_issues_repo_path": "include/random.f90",
"max_line_length": 106,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4f072f58ada42c7dc34aa6b348a06ece8dc358fc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "edwinb-ai/brownian-dynamics",
"max_stars_repo_path": "include/random.f90",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 255,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 819
} |
import numpy as np
import torch
from torch.autograd import Variable
from .noise import ParamNoise, OrnsteinUhlenbeckProcess
use_cuda = torch.cuda.is_available()
class ActionNoiseExploration:
def __init__(self, actor, env, ou_theta, ou_sigma):
self.actor = actor
self.random_process = OrnsteinUhlenbeckProcess(env.action_space.shape[0],
theta=ou_theta, sigma=ou_sigma)
def select_action(self, state, exploration=True):
if use_cuda:
state = state.cuda()
self.actor.eval()
action = self.actor(state)
self.actor.train()
if exploration:
noise = Variable(torch.from_numpy(self.random_process.sample()).float())
if use_cuda:
noise = noise.cuda()
action = action + noise
return action
def reset(self):
self.random_process.reset()
class ParamNoiseExploration:
def __init__(self, actor, batch_size, memory):
self.actor = actor
self.param_noise = ParamNoise(batch_size, memory)
self.perturbed_model = self.param_noise.perturb_model(self.actor)
def select_action(self, state, exploration=True):
if use_cuda:
state = state.cuda()
if exploration:
self.perturbed_model.eval()
action = self.perturbed_model(state)
self.perturbed_model.train()
self.param_noise.update_sigma(self.actor, self.perturbed_model)
else:
self.actor.eval()
action = self.actor(state)
self.actor.train()
return action
def reset(self):
self.param_noise.reset()
self.perturbed_model = self.param_noise.perturb_model(self.actor)
| {
"alphanum_fraction": 0.6063596491,
"author": null,
"avg_line_length": 30.4,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2d43828eecace0aa4303d569dcd5a6d72b40e90d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7b42265a42c389964ddf3ff535dbfb9370863ac5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nicoring/RoBonjwa",
"max_forks_repo_path": "roborl/util/exploration.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "7b42265a42c389964ddf3ff535dbfb9370863ac5",
"max_issues_repo_issues_event_max_datetime": "2018-03-02T19:39:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-03-02T19:39:36.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nicoring/RoBonjwa",
"max_issues_repo_path": "roborl/util/exploration.py",
"max_line_length": 86,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7b42265a42c389964ddf3ff535dbfb9370863ac5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nicoring/RoBonjwa",
"max_stars_repo_path": "roborl/util/exploration.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 362,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1824
} |
[STATEMENT]
lemma (in heapstruct) decrease_key_op_invar:
"\<lbrakk>heap_invar h; valid h i; prio v \<le> prio_of h i\<rbrakk> \<Longrightarrow> decrease_key_op i v h \<le> SPEC heap_invar"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>heap_invar h; valid h i; prio v \<le> prio_of h i\<rbrakk> \<Longrightarrow> decrease_key_op i v h \<le> SPEC heap_invar
[PROOF STEP]
unfolding decrease_key_op_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>heap_invar h; valid h i; prio v \<le> prio_of h i\<rbrakk> \<Longrightarrow> ASSERT (valid h i \<and> prio v \<le> prio_of h i) \<bind> (\<lambda>_. update_op h i v \<bind> (\<lambda>h. swim_op h i)) \<le> SPEC heap_invar
[PROOF STEP]
apply refine_vcg
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. \<lbrakk>heap_invar h; valid h i; prio v \<le> prio_of h i; valid h i \<and> prio v \<le> prio_of h i; x = update h i v\<rbrakk> \<Longrightarrow> swim_invar x i
2. \<And>x xa. \<lbrakk>heap_invar h; valid h i; prio v \<le> prio_of h i; valid h i \<and> prio v \<le> prio_of h i; x = update h i v; \<alpha> xa = \<alpha> x \<and> heap_invar xa \<and> length xa = length x\<rbrakk> \<Longrightarrow> heap_invar xa
[PROOF STEP]
by (auto simp: swim_invar_decr) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Refine_Imperative_HOL_IICF_Impl_Heaps_IICF_Abs_Heapmap",
"hexsha": null,
"include": null,
"lang": null,
"length": 3,
"llama_tokens": 541,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
#include "iostream"
#include "QString"
#include "volumeinfo.h"
#include "volumeutility.h"
#include "QDebug"
#include <VolumeViz/nodes/SoVolumeRendering.h>
#include <VolumeViz/converters/SoVolumeConverter.h>
#include <LDM/converters/SoConverterParameters.h>
#include <Inventor/helpers/SbDataTypeMacros.h>
#include <LDM/readers/SoVRLdmFileReader.h>
#include <Inventor/devices/SoCpuBufferObject.h>
#include <VolumeViz/nodes/SoVolumeRendering.h>
#include <VolumeViz/nodes/SoVolumeData.h>
#include <VolumeViz/nodes/SoTransferFunction.h>
#include <VolumeViz/readers/SoVRGenericFileReader.h>
#include <LDM/converters/SoConverterParameters.h>
#include <LDM/readers/SoLDMReader.h>
#include <LDM/readers/SoVRLdmFileReader.h>
#include <LDM/SoLDMTopoOctree.h>
//#include <LDM/tiles/>
#include <LDM/fields/SoSFLDMResourceParameters.h>
#include "NeoInsightsLDMreader.h"
#include "NeoInsightsLDMWriter.h"
#include <VolumeViz/nodes/SoVolumeRender.h>
#include <Inventor/Xt/SoXt.h>
#include <Inventor/Xt/viewers/SoXtExaminerViewer.h>
#include <Inventor/nodes/SoSeparator.h>
#include <VolumeViz/nodes/SoVolumeData.h>
#include <VolumeViz/nodes/SoVolumeSkin.h>
#include <LDM/nodes/SoDataRange.h>
#include <LDM/nodes/SoTransferFunction.h>
#include <VolumeViz/nodes/SoVolumeRendering.h>
#include "QDomDocument"
#include "volumeconverter.h"
#include "opencvincludes.h"
#include "volumetopooctree.h"
#include "display3droutines.h"
#include "vtkImageData.h"
#include "vtkMarchingCubes.h"
#include "histogramfilter.h"
#include "QDebug"
#include "volumesegmenter.h"
#include "vtkSTLReader.h"
#include <vtkColorTransferFunction.h>
#include <vtkSmartVolumeMapper.h>
#include <vtkVolumeProperty.h>
#include <vtkPiecewiseFunction.h>
#include <vtkPointData.h>
#include <vtkDataArray.h>
#include <vtkRenderWindow.h>
#include <vtkRenderer.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkPLYWriter.h>
#include "rawvolumedataio.h"
#include "volumeutility.h"
#include <Inventor/nodes/SoFragmentShader.h>
#include <VolumeViz/nodes/SoVolumeRenderingQuality.h>
#include "customtilemanager.h"
#include "customtilevisitor.h"
#include "customnodefrontmanager.h"
#include "CustomTiledReader.h"
#include "optimizedzxogenerator.h"
#include "rawvolumedataio.h"
#include "iostream"
#include "opencvincludes.h"
#include "ippr.h"
#include "ipp.h"
#include "NeoInsightsLDMWriter.h"
#include "optimizedzxoreader.h"
#include "openOR/ZeissBackend.hpp"
#include <openOR/Image/ZeissRawImporter.hpp>// Zeiss_ImageIO
#include <openOR/Image/ZeissRawExporter.hpp>// Zeiss_ImagIO
#include <openOR/Image/ZeissRawExtendedExporter.hpp>// Zeiss_ImagIO
#include <openOR/Image/HistogramCollector.hpp>// Image_Utility
#include <openOR/Image/SubvolumeHistogramCollector.hpp>// Image_Utility
#include <openOR/Image/VolumeCollector.hpp>// Image_Utility
#include <openOR/Image/HistogramAnalyser.hpp>// Image_Utility
#include <openOR/Image/HistogramMerger.hpp>// Image_Utility
#include <openOR/Image/HistogramProbabilityAnalyser.hpp>// Image_Utility
#include <openOR/ScanSurfaceSegmentor.hpp>// ZeissBackend
#include <openOR/MTScanSurfaceSegmentor.hpp>// ZeissBackend
#include <openOR/ProbabilitySegmentation.hpp>// ZeissBackend
#include <openOR/Image/ROIContainer.hpp> // Image_regions
#include <boost/function.hpp>
#include <functional>
#include <algorithm>
#include <openOR/Plugin/create.hpp> //openOR_core
#include <openOR/Image/Image3DData.hpp> //Image_ImageData
#include <openOR/Log/Logger.hpp> //openOR_core
# define OPENOR_MODULE_NAME "Application.ZeissBackend"
# include <openOR/Log/ModuleFilter.hpp>
//#include <openOR/cleanUpWindowsMacros.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
#include <boost/foreach.hpp>
void viewVolumeSkin(SoVolumeData* pVolumeData, ushort minVal, ushort maxVal);
void viewVolume(std::string zxoFilePath, int minVal, int maxVal);
int main(int argc, char **argv)
{
std::string zxoFilePath = "C:/Data/ZxoData/CZXRM_Sandbox.ZXO";
int minVal = 0 , maxVal = 0;
SoVolumeRendering::init();
viewVolume( zxoFilePath , minVal , maxVal );
return 0;
}
void viewVolumeSkin(SoVolumeData* pVolumeData, ushort minVal, ushort maxVal)
{
// If necessary, specify the actual range of the data values.
// By default VolumeViz maps the entire range of the voxel data type
// (e.g. 0..65535 for unsigned short) into the colormap. This works
// great for byte (8 bit) voxels, but not so well for 16 bit voxels
// and not at all for floating point voxels. So it's not actually
// necessary for this data set, but shown here for completeness.
// NOTE: Min/max values are stored in the header for LDM format
// files, but for other formats the getMinMax query can take a
// long time because VolumeViz has to examine every voxel.
SoDataRange *pRange = new SoDataRange();
int voxelSize = pVolumeData->getDataSize();
//std::cout << " voxel size : " << voxelSize << " " << pVolumeData->getDataType() << std::endl;
//if (voxelSize > 1)
{
//double minval, maxval;
//pVolumeData->getMinMax(minval, maxval);
pRange->min = minVal;
pRange->max = maxVal;
}
// Use a predefined colorMap with the SoTransferFunction
SoTransferFunction* pTransFunc = new SoTransferFunction;
pTransFunc->predefColorMap = SoTransferFunction::STANDARD;//AIRWAY //NONE; //STANDARD;//TEMPERATURE;//
pTransFunc->minValue = minVal;
pTransFunc->maxValue = maxVal;
//NONE,
// /** Grey (Default). This is constant intensity (white) with a 0..1 alpha ramp.
// * A good initial color map for volume rendering. */
// GREY,
// /** Gray (Synonym of grey) */
// GRAY = GREY,
// /** Temperature (opaque). */
// TEMPERATURE,
// /** Physics (opaque). */
// PHYSICS,
// /** Standard (opaque). */
// STANDARD,
// /** Glow (opaque). This is similar to "hot iron" type color maps. */
// GLOW,
// /** Blue red (opaque). */
// BLUE_RED,
// /** Seismic */
// SEISMIC,
// /** Blue white red (opaque). */
// BLUE_WHITE_RED,
// /** Intensity (opaque). This is an intensity ramp (gray scale) with constant alpha (1).
// * A good initial color map for slice rendering. */
// INTENSITY,
// /** 256 labels (opaque). A good color map for rendering label volumes. */
// LABEL_256,
// /** VolRenRed (opacity ramp). Suitable for volume rendering. **/
// VOLREN_RED,
// /** VolRenGreen (opacity ramp). Suitable for volume rendering. **/
// VOLREN_GREEN,
// /** Airway.
// * This colormap is especially adapted to display airways of CT Scan datasets.
// * Ranges of data may have to be adapted depending on your acquisition device's calibration.
// * Please refer to SoDataRange node for details. */
// AIRWAY,
// /** Airway surfaces.
// * This colormap is especially adapted to display airways and soft tissues of CT Scan datasets.
// * Ranges of data may have to be adapted depending on your acquisition device's calibration.
// * Please refer to SoDataRange node for details. */
// AIRWAY_SURFACES
// Node in charge of drawing the volume
SoVolumeSkin *pVolRender = new SoVolumeSkin;
// Node in charge of drawing the volume
SoVolumeRender* pVolRender2 = new SoVolumeRender;
pVolRender2->samplingAlignment = SoVolumeRender::DATA_ALIGNED;
// Assemble the scene graph
// Note: SoVolumeSkin must appear after the SoVolumeData node.
SoSeparator *root = new SoSeparator;
root->ref();
root->addChild(pVolumeData);
//root->addChild(pRange);
root->addChild(pTransFunc);
root->addChild(pVolRender2);
Widget myWindow = SoXt::init("");
// Set up viewer:
SoXtExaminerViewer *myViewer = new SoXtExaminerViewer(myWindow);
myViewer->setSceneGraph(root);
myViewer->setTitle("Volume Skin");
myViewer->show();
SoXt::show(myWindow);
SoXt::mainLoop();
SoVolumeRendering::finish();
SoXt::finish();
}
void viewVolume(std::string zxoFilePath, int minVal, int maxVal)
{
SoVolumeRendering::init();
imt::volume::OptimizedZXOReader::initClass();
SoVolumeData *volumeData = new SoVolumeData;
imt::volume::OptimizedZXOReader *reader = new imt::volume::OptimizedZXOReader();
volumeData->setReader(*reader, true);
volumeData->fileName.setValue(zxoFilePath);//filePath // //zxoFilePath.toStdString() //"backup.zxo2"
std::vector< imt::volume::Material > materials;
imt::volume::VolumeUtility::computeMaterials( volumeData , materials );
//volumeData->ldmResourceParameters.getValue()->resolution = 0;
// If necessary, specify the actual range of the data values.
// By default VolumeViz maps the entire range of the voxel data type
// (e.g. 0..65535 for unsigned short) into the colormap. This works
// great for byte (8 bit) voxels, but not so well for 16 bit voxels
// and not at all for floating point voxels. So it's not actually
// necessary for this data set, but shown here for completeness.
// NOTE: Min/max values are stored in the header for LDM format
// files, but for other formats the getMinMax query can take a
// long time because VolumeViz has to examine every voxel.
SoDataRange *pRange = new SoDataRange();
int voxelSize = volumeData->getDataSize();
//std::cout << " voxel size : " << voxelSize << " " << pVolumeData->getDataType() << std::endl;
//if (voxelSize > 1)
{
//double minval, maxval;
//pVolumeData->getMinMax(minval, maxval);
pRange->min = minVal;
pRange->max = maxVal;
}
// Use a predefined colorMap with the SoTransferFunction
SoTransferFunction* pTransFunc = new SoTransferFunction;
pTransFunc->predefColorMap = SoTransferFunction::STANDARD;//AIRWAY //NONE; //STANDARD;//TEMPERATURE;//
pTransFunc->minValue = minVal;
pTransFunc->maxValue = maxVal;
//NONE,
// /** Grey (Default). This is constant intensity (white) with a 0..1 alpha ramp.
// * A good initial color map for volume rendering. */
// GREY,
// /** Gray (Synonym of grey) */
// GRAY = GREY,
// /** Temperature (opaque). */
// TEMPERATURE,
// /** Physics (opaque). */
// PHYSICS,
// /** Standard (opaque). */
// STANDARD,
// /** Glow (opaque). This is similar to "hot iron" type color maps. */
// GLOW,
// /** Blue red (opaque). */
// BLUE_RED,
// /** Seismic */
// SEISMIC,
// /** Blue white red (opaque). */
// BLUE_WHITE_RED,
// /** Intensity (opaque). This is an intensity ramp (gray scale) with constant alpha (1).
// * A good initial color map for slice rendering. */
// INTENSITY,
// /** 256 labels (opaque). A good color map for rendering label volumes. */
// LABEL_256,
// /** VolRenRed (opacity ramp). Suitable for volume rendering. **/
// VOLREN_RED,
// /** VolRenGreen (opacity ramp). Suitable for volume rendering. **/
// VOLREN_GREEN,
// /** Airway.
// * This colormap is especially adapted to display airways of CT Scan datasets.
// * Ranges of data may have to be adapted depending on your acquisition device's calibration.
// * Please refer to SoDataRange node for details. */
// AIRWAY,
// /** Airway surfaces.
// * This colormap is especially adapted to display airways and soft tissues of CT Scan datasets.
// * Ranges of data may have to be adapted depending on your acquisition device's calibration.
// * Please refer to SoDataRange node for details. */
// AIRWAY_SURFACES
// Node in charge of drawing the volume
SoVolumeSkin *pVolRender = new SoVolumeSkin;
// Node in charge of drawing the volume
SoVolumeRender* pVolRender2 = new SoVolumeRender;
pVolRender2->samplingAlignment = SoVolumeRender::DATA_ALIGNED;
// Assemble the scene graph
// Note: SoVolumeSkin must appear after the SoVolumeData node.
SoSeparator *root = new SoSeparator;
root->ref();
root->addChild(volumeData);
//root->addChild(pRange);
root->addChild(pTransFunc);
root->addChild(pVolRender2);
Widget myWindow = SoXt::init("");
// Set up viewer:
SoXtExaminerViewer *myViewer = new SoXtExaminerViewer(myWindow);
myViewer->setSceneGraph(root);
myViewer->setTitle("Volume Skin");
myViewer->show();
SoXt::show(myWindow);
SoXt::mainLoop();
SoVolumeRendering::finish();
SoXt::finish();
}
#if 0
void createHistogramRegions() {
std::tr1::shared_ptr<openOR::Image::Image1DData<openOR::Triple<size_t> > > m_pRegionsOrg, m_pRegionsHalf, m_pRegionsThird, m_pRegionsFourth, m_pRegions;
std::tr1::shared_ptr<openOR::Image::Image1DData<openOR::Quad<double> > > m_pProbabilities;
std::tr1::shared_ptr<openOR::Image::Image1DDataUI64> m_pHistogramFirst;
std::tr1::shared_ptr<openOR::Image::Image1DDataUI64> m_pHistogramSecond;
std::tr1::shared_ptr<openOR::Image::Image1DDataUI64> m_pHistogramThird;
std::tr1::shared_ptr<openOR::Image::Image1DDataUI64> m_pHistogramFourth;
size_t m_maxMemorySize;
int m_downSampler;
size_t m_ROIBorderSize;
size_t m_ROIMinEdgeLength;
size_t m_ROIMinVolume;
bool m_outputMaterialIndex;
unsigned int m_nFirstMaterialIndex;
unsigned int m_nSegmentationRadius;
int m_expandMaskTimes;
bool m_bGPU;
float m_fProbabilityThreshold;
size_t m_nFilterRadius;
size_t m_nOpenMPCurrentThreads;
size_t m_nOpenMPMaxThreads;
float m_fCPUPerformanceValue;
bool m_bAutoScaling;
bool m_bMTSurfSegment;
unsigned int m_numberSeparationThreads;
bool m_bSearchForHighDensityCarrierMaterial;
int m_useMultiScaleHistogram;
size_t m_objectThreshold;
size_t m_background;
size_t m_usedHistogramResolutions;
// - initialize Region- and Probablility-members if they are uninitialized
// - depending on the number of histograms to be used create instance of a Histogramanalyser, caluculate the regions and store them to the matching member
// - merge all histograms
// - correct the shifts among the histograms (because the peaks in the histograms appear in different places due to the different resolutions)
// - create instance of HistogramProbabilityAnalyser and and fit normal distributions on the histogram peaks
//assert(m_pHistogramFirst && "Need filled histogram to calculate material regions.");
if (!m_pRegions)
{
m_pRegions = openOR::createInstanceOf< openOR::Image::Image1DData< openOR::Triple<size_t> > >();
}
if (!m_pProbabilities)
{
m_pProbabilities = openOR::createInstanceOf<openOR::Image::Image1DData<openOR::Quad<double> > >();
}
else
{
m_pProbabilities = openOR::createInstanceOf<openOR::Image::Image1DData<openOR::Quad<double> > >();
LOG(openOR::Log::Level::Info, OPENOR_MODULE, openOR::Log::msg("Probabilities were already set. Resetting them."));
}
uint nObjectThresholdFromHistogramResolustionStep = 0;
bool bSearchforhighdensitycarriermaterial = m_bSearchForHighDensityCarrierMaterial;
// create second and third histogram
if (m_useMultiScaleHistogram >= 2)
{
assert(m_pHistogramSecond && m_pHistogramThird && "Need reduced histograms to use multi-scale histograms.");
if (!m_pRegionsOrg)
{
m_pRegionsOrg = openOR::createInstanceOf<openOR::Image::Image1DData<openOR::Triple<size_t> > >();
}
if (!m_pRegionsHalf)
{
m_pRegionsHalf = openOR::createInstanceOf<openOR::Image::Image1DData<openOR::Triple<size_t> > >();
}
if (!m_pRegionsThird)
{
m_pRegionsThird = openOR::createInstanceOf<openOR::Image::Image1DData<openOR::Triple<size_t> > >();
}
if (!m_pRegionsFourth)
{
m_pRegionsFourth = openOR::createInstanceOf<openOR::Image::Image1DData<openOR::Triple<size_t> > >();
}
if (!m_pRegions)
{
m_pRegions = openOR::createInstanceOf<openOR::Image::Image1DData<openOR::Triple<size_t> > >();
}
//initialize first histogram analyser and compute regions
std::tr1::shared_ptr< openOR::Image::HistogramAnalyser> pCalc = openOR::createInstanceOf<openOR::Image::HistogramAnalyser>();
pCalc->setData(m_pHistogramFirst, "in");
pCalc->setData(m_pRegionsOrg, "out");
pCalc->setSearchforhighdensitycarriermaterial(bSearchforhighdensitycarriermaterial);
(*pCalc)();
m_objectThreshold = pCalc->objectThreshold();
m_background = pCalc->backgroundPeak();
//initialize second histogram analyser and compute regions on half the resolution
pCalc = openOR::createInstanceOf<openOR::Image::HistogramAnalyser>();
pCalc->setData(m_pHistogramSecond, "in");
pCalc->setData(m_pRegionsHalf, "out");
pCalc->setSearchforhighdensitycarriermaterial(bSearchforhighdensitycarriermaterial);
(*pCalc)();
if (m_pRegionsHalf->size() > 1 && m_objectThreshold >= m_pHistogramFirst->size() - (64 + 1))
{
nObjectThresholdFromHistogramResolustionStep = 1;
m_objectThreshold = pCalc->objectThreshold();
}
//initialize third histogram analyser and compute regions on a third of the resolution
if (m_usedHistogramResolutions >= 3)
{
pCalc = openOR::createInstanceOf<openOR::Image::HistogramAnalyser>();
pCalc->setData(m_pHistogramThird, "in");
pCalc->setData(m_pRegionsThird, "out");
pCalc->setSearchforhighdensitycarriermaterial(bSearchforhighdensitycarriermaterial);
(*pCalc)();
if (m_pRegionsThird->size() > 1 && m_objectThreshold >= m_pHistogramFirst->size() - (64 + 1)){
nObjectThresholdFromHistogramResolustionStep = 2;
m_objectThreshold = pCalc->objectThreshold();
}
}
//initialize fourth histogram analyser and compute regions on a quarter of the resolution
if (m_usedHistogramResolutions >= 4)
{
pCalc = openOR::createInstanceOf<openOR::Image::HistogramAnalyser>();
pCalc->setData(m_pHistogramFourth, "in");
pCalc->setData(m_pRegionsFourth, "out");
pCalc->setSearchforhighdensitycarriermaterial(bSearchforhighdensitycarriermaterial);
(*pCalc)();
if (m_pRegionsFourth->size() > 1 && m_objectThreshold >= m_pHistogramFirst->size() - (64 + 1)){
nObjectThresholdFromHistogramResolustionStep = 3;
m_objectThreshold = pCalc->objectThreshold();
}
}
//merge all above histograms
std::tr1::shared_ptr<openOR::Image::HistogramMerger> pMerger = openOR::createInstanceOf<openOR::Image::HistogramMerger>();
pMerger->setData(m_pRegionsOrg, "in");
pMerger->setData(m_pRegionsHalf, "in");
if (m_usedHistogramResolutions >= 3) pMerger->setData(m_pRegionsThird, "in");
if (m_usedHistogramResolutions >= 4) pMerger->setData(m_pRegionsFourth, "in");
pMerger->setData(m_pRegions, "out");
pMerger->setData(m_pHistogramFirst);
//openOR::setCurrentStep(pMerger);
//m_pCurrentStep = interface_cast<Progressable>(step); // can be null, that is fine!
//m_pCanCancelCurrentStep = interface_cast<Cancelable>(step); // dito
//if (m_inOneGoFlag) {
// assert(m_currentStepNum < m_numSteps && "one step to far!");
// ++m_currentStepNum;
//}
try {
(*pMerger)();
}
catch (...) {
m_pRegions = m_pRegionsOrg;
}
//correct the shift in peaks: (the peaks in the histograms appear in different places due to the different resolutions)
if (1)//(m_correctRegionShift)
{
std::vector<std::tr1::shared_ptr<openOR::Image::Image1DData<openOR::Triple<size_t> > > > vecMultiResolutionDensityIntervals;
vecMultiResolutionDensityIntervals.push_back(m_pRegionsOrg);
vecMultiResolutionDensityIntervals.push_back(m_pRegionsHalf);
if (m_usedHistogramResolutions >= 3) vecMultiResolutionDensityIntervals.push_back(m_pRegionsThird);
if (m_usedHistogramResolutions >= 4) vecMultiResolutionDensityIntervals.push_back(m_pRegionsFourth);
//get the correction for shifting the peaks to match the peaks in the other histograms
std::vector<int> vecShifts = pMerger->getShiftCorrections();
//shift the peaks
correctRegionShifts(vecMultiResolutionDensityIntervals, vecShifts);
if (nObjectThresholdFromHistogramResolustionStep > 0 && nObjectThresholdFromHistogramResolustionStep < vecShifts.size())
{
m_objectThreshold = m_objectThreshold - vecShifts.at(nObjectThresholdFromHistogramResolustionStep);
}
size_t vecSize = vecMultiResolutionDensityIntervals.size();
m_pRegionsOrg = vecMultiResolutionDensityIntervals[0];
if (vecSize > 2) m_pRegionsHalf = vecMultiResolutionDensityIntervals[1];
if (vecSize > 3) m_pRegionsThird = vecMultiResolutionDensityIntervals[2];
if (vecSize > 4) m_pRegionsFourth = vecMultiResolutionDensityIntervals[3];
}
}
else if (m_useMultiScaleHistogram == 1)
{
// if there is only one Histogram there is no merging to do and the result is the one from the original scale
std::tr1::shared_ptr<openOR::Image::HistogramAnalyser> pCalc = openOR::createInstanceOf<openOR::Image::HistogramAnalyser>();
pCalc->setData(m_pHistogramFirst, "in");
pCalc->setData(m_pRegions, "out");
setCurrentStep(pCalc);
(*pCalc)();
m_objectThreshold = pCalc->objectThreshold();
m_background = pCalc->backgroundPeak();
}
else {
// object threshold should be set from outside
m_objectThreshold = 0x0fff;
m_background = 0;
//FH: added a note for the user to know that we use empirical values for the object threshold and the background peak
}
// set first material
for (size_t i = 0; i < m_pRegions->size(); i++) {
const openOR::Triple<size_t>& region = m_pRegions->data()[i];
if (region.second > m_objectThreshold) {
setFirstMaterialIndex(static_cast<unsigned int>(i));
break;
}
}
//calc probabilities and fit nomal distributions on the histogram
std::tr1::shared_ptr<openOR::Image::HistogramProbabilityAnalyser> pProbAnalyser = openOR::createInstanceOf<openOR::Image::HistogramProbabilityAnalyser>();
pProbAnalyser->setData(m_pHistogramSecond);
pProbAnalyser->setData(m_pRegions);
pProbAnalyser->setData(m_pProbabilities);
pProbAnalyser->setHighDensityMaterialSearch(true);
(*pProbAnalyser)();
}
#endif
| {
"alphanum_fraction": 0.7031139106,
"author": null,
"avg_line_length": 37.3864013267,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "1afe822219ee089fa7233a1c16292fa18b0f55e5",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2bd848db88d7b271209ad30017c8f62307319be3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "avinfinity/UnmanagedCodeSnippets",
"max_forks_repo_path": "airfilteredvolumerenderingdemo.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2bd848db88d7b271209ad30017c8f62307319be3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "avinfinity/UnmanagedCodeSnippets",
"max_issues_repo_path": "airfilteredvolumerenderingdemo.cpp",
"max_line_length": 155,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2bd848db88d7b271209ad30017c8f62307319be3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "avinfinity/UnmanagedCodeSnippets",
"max_stars_repo_path": "airfilteredvolumerenderingdemo.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5984,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 22544
} |
import numpy as np
def get_distances(data, factual, counterfactual):
"""
Computes distances 1 to 4
:param data: Dataframe with original data
:param factual: List of features
:param counterfactual: List of features
:return: Array of distances 1 to 4
"""
d1 = d1_distance(factual, counterfactual)
d2 = d2_distance(factual, counterfactual, data)
d3 = d3_distance(factual, counterfactual, data)
d4 = d4_distance(factual, counterfactual)
return np.array([d1, d2, d3, d4])
def d1_distance(instance, cf):
"""
Compute d1-distance
:param instance: List of original feature
:param cf: List of counterfactual feature
:return: Scalar number
"""
# get difference between original and counterfactual
delta = get_delta(instance, cf)
# compute elements which are greater than 0
delta_bin = [i != 0 for i in delta]
delta_bin = delta_bin[:-1] # loose label column
d1 = sum(delta_bin)
return d1
def d2_distance(instance, cf, df):
"""
Compute d2 distance
:param instance: List of original feature
:param cf: List of counterfactual feature
:param df: Dataframe object of dataset
:return: Scalar number
"""
# get difference between original and counterfactual
delta = get_delta(instance, cf)
delta = delta[:-1] # loose label column
# get range of every feature
range = get_range(df)
d2 = [np.abs(x[0] / x[1]) for x in zip(delta, range)]
d2 = sum(d2)
return d2
def d3_distance(instance, cf, df):
"""
Compute d3 distance
:param instance: List of original feature
:param cf: List of counterfactual feature
:param df: Dataframe object of dataset
:return: Scalar number
"""
# get difference between original and counterfactual
delta = get_delta(instance, cf)
delta = delta[:-1] # loose label column
# get range of every feature
range = get_range(df)
d3 = [(x[0] / x[1]) ** 2 for x in zip(delta, range)]
d3 = sum(d3)
return d3
def d4_distance(instance, cf):
"""
Compute d4 distance
:param instance: List of original feature
:param cf: List of counterfactual feature
:return: Scalar number
"""
# get difference between original and counterfactual
delta = get_delta(instance, cf)
delta = delta[:-1] # loose label column
d4 = [np.abs(x) for x in delta]
d4 = np.max(d4)
return d4
def get_delta(instance, cf):
"""
Compute difference between original instance and counterfactual
:param instance: List of features of original instance
:param cf: List of features of counterfactual
:return: List of differences between cf and original instance
"""
delta = []
for i, original in enumerate(instance):
counterfactual = cf[i]
if type(original) == str:
if original == counterfactual:
delta.append(0)
else:
delta.append(1)
else:
delta.append(counterfactual - original)
return delta
def get_max_list(data):
"""
get max element for every column.
Max for string elements is 1
:param data: numpy array
:return: list of max elements
"""
max = []
for i in range(data.shape[-1] - 1):
column = data[:, i]
if type(column[0]) == str:
max.append(1)
else:
max.append(np.max(column))
return max
def get_min_list(data):
"""
get min element for every column.
Min for string elements is 0
:param data: numpy array
:return: list of min elements
"""
min = []
for i in range(data.shape[-1] - 1):
column = data[:, i]
if type(column[0]) == str:
min.append(0)
else:
min.append(np.min(column))
return min
def get_range(df):
"""
Get range max - min of every feature
:param df: dataframe object of dataset
:return: list of ranges for every feature
"""
data = df.values
max = get_max_list(data)
min = get_min_list(data)
range = [x[0] - x[1] for x in zip(max, min)]
return range
| {
"alphanum_fraction": 0.6229547642,
"author": null,
"avg_line_length": 24.3040935673,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d498d19f3a5d9422628a0a4a1dec109ba4cef966",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "beb0a8b5f04b30acd3b617d4443941f815601ba0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Philoso-Fish/CARLA",
"max_forks_repo_path": "carla/evaluation/distances.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "beb0a8b5f04b30acd3b617d4443941f815601ba0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Philoso-Fish/CARLA",
"max_issues_repo_path": "carla/evaluation/distances.py",
"max_line_length": 67,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "beb0a8b5f04b30acd3b617d4443941f815601ba0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Philoso-Fish/CARLA",
"max_stars_repo_path": "carla/evaluation/distances.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1082,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4156
} |
# -*- coding: utf-8 -*-
"""Parse NMEA logs from Alliance."""
from datetime import datetime, timedelta
import metpy.calc as mcalc
import metpy.units as metunits
import numpy as np
import pandas as pd
from pathlib import Path
import pynmea2
import xarray as xr
try:
# If tqdm is installed
try:
# Check if it's Jupyter Notebook
ipy_str = str(type(get_ipython()))
if 'zmqshell' in ipy_str.lower():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
except NameError:
from tqdm import tqdm
from functools import partial
pbar = partial(tqdm, leave=False)
except ImportError:
def pbar(obj, **tqdm_kw):
"""Empty progress bar."""
return obj
# TODO: docstrings!!!
# Typical message list
MSG_LIST = [
dict(talker='GGA', fields=(('datetime_str',), ('longitude',), ('latitude',))),
dict(talker='HDT', fields=(('datetime_str',), ('heading', float))),
dict(talker='MWV', fields=(('datetime_str',), ('status', str), ('reference', str),
('wind_speed', float), ('wind_angle', float))),
dict(talker='VHW', fields=(('datetime_str',), ('water_speed_knots', float), )),
]
class AllianceComposite:
"""
Class for processing Alliance NMEA logs
Useful for combining several messages together, aligning data along time axis,
averaging over time, and saving to netCDF files.
Typical workflow:
>>> ac = AllianceComposite(Path('/path/to/file.log'), datetime.datetime(2018, 3, 1))
>>> ac.process(msg_list)
>>> averaged_dataset = ac.average_over_time(freq='1H')
>>> ac.to_netcdf(averaged_dataset, path='myfile.nc')
"""
TSTART = datetime(1970, 1, 1)
def __init__(self, fname, date):
"""
Initialise the AllianceComposite object
Arguments
---------
fname: pathlib.Path
Path to the log file to process
date: datetime.datetime
Log file date
"""
assert isinstance(fname, Path), 'fname should be a Path object!'
self.fname = fname
self.date = date
# Assume 1 second frequency of the log files
# TODO: make it flexible
self.time_range = (pd.date_range(start=date,
freq='S',
periods=86400)
.to_series()
.to_frame(name='time'))
self.data_d = dict()
def read(self, msg_req_list):
"""
Read the log file and store results as `.ds` attribute (xarray.Dataset).
Arguments
---------
msg_req_list: list
List of dictionaries with fields to extract from NMEA messages
"""
for msg_req in msg_req_list:
self.data_d[msg_req['talker']] = dict()
for fld in msg_req['fields']:
self.data_d[msg_req['talker']][fld[0]] = []
with self.fname.open('r') as f:
for line in tqdm(f.readlines()):
try:
msg = pynmea2.NMEASentence.parse(line)
for msg_req in msg_req_list:
if isinstance(msg, getattr(pynmea2.talker,
msg_req['talker'])):
for fld in msg_req['fields']:
assert isinstance(fld, tuple), 'Each field must be tuple!'
value = getattr(msg, fld[0])
if len(fld) == 2:
# if the tuple contains two elements, assume the second one
# is a function to convert the field value
try:
value = fld[1](value)
except (ValueError, TypeError):
value = np.nan
self.data_d[msg_req['talker']][fld[0]].append(value)
except pynmea2.ParseError:
pass
# Convert dictionaries of lists to dataframes and merge them together
# using the time_range dataframe of 86400 seconds
df = self.time_range
for val in self.data_d.values():
msg_df = pd.DataFrame(data=val)
msg_df.rename(dict(datetime_str='datetime'), axis=1, inplace=True)
msg_df['datetime'] = (msg_df['datetime']
.astype(int)
.apply(lambda x: self.TSTART + timedelta(seconds=x)))
msg_df = msg_df.drop_duplicates('datetime').set_index('datetime')
df = pd.merge(df, msg_df,
how='outer', left_index=True, right_index=True)
self.ds = df.to_xarray()
def clean_up(self, drop_time=True,
mask_invalid_wind=True, mask_relative_wind=True,
convert_to_uv=True, convert_to_mps=True):
"""
Clean up the dataset and add essential attributes.
Arguments
---------
drop_time: bool
Remove additional time variable (and leave only the index)
mask_invalid_wind: bool
Mask out wind speed and wind angle values if $INMWV Status is not "A"
mask_relative_wind: bool
Mask out wind speed and wind angle values if $INMWV Reference is not "T"
convert_to_uv: bool
Convert wind speed and wind angle to u- and v-components
convert_to_mps: bool
Convert units of wind speed and water speed from knots to m/s
"""
self.ds.longitude.attrs['units'] = 'degrees_east'
self.ds.latitude.attrs['units'] = 'degrees_north'
if drop_time:
self.ds = self.ds.drop(labels=['time'])
self.ds.rename(dict(index='time'), inplace=True)
if mask_invalid_wind:
self.ds.wind_angle.values[self.ds.status != 'A'] = np.nan
self.ds.wind_speed.values[self.ds.status != 'A'] = np.nan
self.ds = self.ds.drop(labels=['status'])
if mask_relative_wind:
self.ds.wind_angle.values[self.ds.reference != 'T'] = np.nan
self.ds.wind_speed.values[self.ds.reference != 'T'] = np.nan
self.ds = self.ds.drop(labels=['reference'])
if convert_to_mps:
kt2mps = metunits.units('knots').to('m/s')
self.ds['wind_speed'] *= kt2mps
self.ds['water_speed_knots'] *= kt2mps
self.ds.rename(dict(water_speed_knots='water_speed'), inplace=True)
else:
kt2mps = metunits.units('knots')
if convert_to_uv:
u, v = mcalc.get_wind_components(self.ds.wind_speed.values * kt2mps,
self.ds.wind_angle.values * metunits.units('degrees'))
self.ds = self.ds.drop(labels=['wind_speed', 'wind_angle'])
self.ds = self.ds.assign(u=xr.Variable(dims='time', data=u,
attrs=dict(units='m s**-1',
long_name='U component of wind',
short_name='eastward_wind')),
v=xr.Variable(dims='time', data=v,
attrs=dict(units='m s**-1',
long_name='V component of wind',
short_name='northward_wind')))
def process(self, msg_req_list, **kwargs):
"""Shortcut for read() and clean_up() methods."""
self.read(msg_req_list)
self.clean_up(**kwargs)
def time_ave(self, freq):
"""
Average the dataset over constant periods of time
Arguments
---------
freq: string or pandas.DateOffset
Size of time chunks. E.g. 10T is 10 minutes
Returns
-------
ave_ds: xarray.Dataset
Dataset of averaged data
"""
return self.ds.resample(time=freq).reduce(np.nanmean)
@classmethod
def to_netcdf(cls, ds, path, encoding=None, **kwargs):
"""Save xarray dataset to netCDF file and ensure that calendar uses the same start date."""
if encoding is None:
encoding = dict(time=dict(units=f'seconds since {cls.TSTART}',
calendar='gregorian'))
ds.to_netcdf(path=path, encoding=encoding, **kwargs)
def average_ds_over_time(ds, date, freq, mark='end', time_res='S'):
"""
Average the dataset over constant periods of time
Arguments
---------
ds: xarray.Dataset
Dataset to average
date: datetime.datetime
Start date
freq: string or pandas.DateOffset
Size of time chunks. E.g. 10T is 10 minutes
mark: string, optional
Time index mark. Can be one "start" or "end", e.g. the start
or the end of time chunks.
time_res: string, optional
Can be seconds (S), minutes (M)
Returns
-------
ave_ds: xarray.Dataset
Dataset of averaged data
"""
# create time index with the given frequency
new_time = pd.date_range(start=date,
end=date+timedelta(hours=23, minutes=59, seconds=59),
freq=freq)
if mark == 'end':
tstep = new_time[1] - new_time[0]
new_time += tstep
# TODO: add "middle" option
if time_res == 'S':
# TODO: rewrite this
ts = tstep.total_seconds()
elif time_res == 'M':
ts = tstep.total_seconds() / 60
# save attributes before averaging
_attrs = {k: ds[k].attrs for k in ds.data_vars}
# average over time chunks
ave_ds = (ds.groupby(xr.IndexVariable(dims='time',
data=np.arange(len(ds.time)) // ts))
.mean())
# reset time index
ave_ds['time'] = new_time
# after groupby operation, the attributes are lost, so the saved are used
for k in ds.data_vars:
ave_ds[k].attrs.update(_attrs[k])
return ave_ds | {
"alphanum_fraction": 0.5360194175,
"author": null,
"avg_line_length": 38.7218045113,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4efa6dc7b149899e8e872cfb411a1ae3ff2e378b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "346d973453ae0804519f0394f31a864791f1af08",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "IGPResearch/shipmeteo",
"max_forks_repo_path": "parse_logs.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "346d973453ae0804519f0394f31a864791f1af08",
"max_issues_repo_issues_event_max_datetime": "2018-08-08T11:39:24.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-07-25T10:52:34.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "IGPResearch/shipmeteo",
"max_issues_repo_path": "parse_logs.py",
"max_line_length": 99,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "346d973453ae0804519f0394f31a864791f1af08",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "IGPResearch/shipmeteo",
"max_stars_repo_path": "parse_logs.py",
"max_stars_repo_stars_event_max_datetime": "2019-06-18T03:40:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-18T03:40:55.000Z",
"num_tokens": 2204,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10300
} |
# Carlos Morato, PhD.
# cwmorato@wpi.edu
# Deep Learning for Advanced Robot Perception
#
# Drop-Based Learning Rate Decay
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas
import numpy
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from sklearn.preprocessing import LabelEncoder
from keras.callbacks import LearningRateScheduler
# learning rate schedule
def step_decay(epoch):
initial_lrate = 0.1 ### increase can be good but too large can not converge properly
drop = 0.5 ### increase can be good but need to think about overfitting
epochs_drop = 20.0 ### increase will be good
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
dataframe = pandas.read_csv("ionosphere.csv", header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:,0:34].astype(float)
Y = dataset[:,34]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
Y = encoder.transform(Y)
# create model
model = Sequential()
model.add(Dense(34, input_dim=34, init='normal', activation='relu'))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate]
# Fit the model
model.fit(X, Y, validation_split=0.33, nb_epoch=50, batch_size=28, callbacks=callbacks_list, verbose=2)
| {
"alphanum_fraction": 0.7652383826,
"author": null,
"avg_line_length": 32.4901960784,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4e69c688ee27b9b0d90ae80dea97af2e54eeaff2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "db16141b1cd18a03f182d418a2cf092f57fe4a6b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JackHaoyingZhou/RBE595_DL_Discussion",
"max_forks_repo_path": "week5/Improving performance/decay_drop_based.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "db16141b1cd18a03f182d418a2cf092f57fe4a6b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JackHaoyingZhou/RBE595_DL_Discussion",
"max_issues_repo_path": "week5/Improving performance/decay_drop_based.py",
"max_line_length": 103,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "db16141b1cd18a03f182d418a2cf092f57fe4a6b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JackHaoyingZhou/RBE595_DL_Discussion",
"max_stars_repo_path": "week5/Improving performance/decay_drop_based.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 424,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1657
} |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import cv2
import glob
import time
import pickle
import shutil
import numpy as np
from .box import Box
from .connected_componentes import *
from .pre_processing import *
from .commonfunctions import *
import traceback
import logging
import skimage.io as io
from PIL import Image
from wand.image import Image
#from wand.display import display
from pathlib import Path
from .segmenter import Segmenter
from imutils import resize as im_resize
from scipy.ndimage import binary_fill_holes
from skimage.morphology import skeletonize, thin
from skimage.filters import threshold_otsu, gaussian, median, threshold_yen
from .staff import calculate_thickness_spacing, remove_staff_lines, coordinator
logging.basicConfig(filename='logs/slicer.log', level=logging.DEBUG)
def Slice(cv_img):
start_time = time.time()
img_buffer=None
imgf=None
imgmat=None
segmented_staves=[]
logging.info("SLICER: beginning binarization " + str(time.time() - start_time))
try:
with Image.from_array(cv_img) as im:
img_buffer = np.asarray(bytearray(im.make_blob("JPEG")), dtype=np.uint8)
ret, mat = binarize_image(img_buffer)
with Image(blob=mat) as timg:
imgf = mat
timg.deskew(0.4*im.quantum_range)
imgf = np.array(timg)
img_buffer = np.asarray(bytearray(timg.make_blob("JPEG")), dtype=np.uint8)
imgmat = cv2.imdecode(img_buffer, cv2.IMREAD_GRAYSCALE)
except cv2.error as e:
logging.error(traceback.format_exc())
logging.error("CV: read error")
return
#imgmat = np.asarray(cv_img)
logging.info("SLICER: beginning segmentation " + str(time.time() - start_time))
imgmat = get_thresholded(imgmat, threshold_otsu(imgmat))
segmenter = Segmenter(imgmat)
imgs_with_staff = segmenter.regions_with_staff
#show_images([imgs_with_staff[0]])
mypath = Path().absolute()
file_path = str(mypath) + '\\segmenter\\output\\'
zip_path = str(mypath) + '\\data\\melody\\'
zip_path1 = str(mypath) + '\\data\\melody'
zip_path2 = str(mypath) + '\\data\\melody'
delete_path = str(mypath) + '\\segmenter\\output'
absolute_path = Path(file_path)
remove_dir = os.listdir(delete_path)
for item in remove_dir:
if item.endswith(".png") or item.endswith(".jpg") or item.endswith(".jpeg"):
os.remove(os.path.join(delete_path, item))
remove_dir = os.listdir(zip_path1)
for item in remove_dir:
if item.endswith(".png") or item.endswith(".jpg") or item.endswith(".jpeg"):
os.remove(os.path.join(zip_path1, item))
logging.info("SLICER: beginning cropping " + str(time.time() - start_time))
for i, img in enumerate(imgs_with_staff):
output_path = file_path+'slice'+str(i)+'.png'
zipped_path = zip_path+'slice'+str(i)+'.png'
save_slice(i, output_path, img)
save_slice(i, zipped_path, img)
logging.info("SLICER: image++ in outputs " + str(time.time() - start_time))
crop(output_path)
crop(zipped_path)
segmented_staves.append(Path(output_path))
logging.info("SLICER: work completed " + str(time.time() - start_time))
return segmented_staves
if __name__ == '__main__':
Slice(r"C:\Users\aroue\Downloads\Documents\@ML\Sheet Music\goodsheet\pgws.png")
| {
"alphanum_fraction": 0.6789871944,
"author": null,
"avg_line_length": 36.1684210526,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "53aa9726c27eb456e00042c3c50f66df93757a7f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2eed2bc068c84ba262f715a94de58db355966921",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Toga-Party/Notable-Server",
"max_forks_repo_path": "segmenter/slicer.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2eed2bc068c84ba262f715a94de58db355966921",
"max_issues_repo_issues_event_max_datetime": "2021-04-01T14:05:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-04-01T14:05:36.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Toga-Party/Notable-Server",
"max_issues_repo_path": "segmenter/slicer.py",
"max_line_length": 90,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2eed2bc068c84ba262f715a94de58db355966921",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Toga-Party/Notable-Server",
"max_stars_repo_path": "segmenter/.ipynb_checkpoints/slicer-checkpoint.py",
"max_stars_repo_stars_event_max_datetime": "2021-03-06T11:34:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-06T11:34:57.000Z",
"num_tokens": 860,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3436
} |
function d = divisor(n)
%% divisor : provides a list of integer divisors of a number.
% divisor(n) : row vector of all distinct divisors of a positive integer N,
% including 1 and N.
%
% Remark:
% This function uses the default factor() routine in Matlab and hence is
% limited to input values upto 2^32. However if factor() routine does get
% updated for larger integers, this function will still work fine.
% Using factor() provides a significant speed improvement over manually
% seaching for the each divisor of n.
%
% Example:
% a = divisor(12);
% returns -> a = [1, 2, 3, 4, 6, 12];
%
% See Also:
% factor, primes
% Author: Yash Kochar ( yashkochar@yahoo.com )
% Last modified: 21st June 2009
%-------------------------------------------------------------------------------
%% Input error check :
% Check whether input is positive integer and scalar.
if ~isscalar(n)
error('divisor:NonScalarInput','Input must be a scalar.');
end
if (n < 1) || (floor(n) ~= n)
error('divisor:PositiveIntegerOnly', 'Input must be a positive integer.');
end
%% Find prime factors of number :
pf = factor(n); % Prime factors of n
upf = unique(pf); % Unique
%% Calculate the divisors
d = upf(1).^(0:1:sum(pf == upf(1)))';
for f = upf(2:end)
d = d*(f.^(0:1:sum(pf == f)));
d = d(:);
end
d = sort(d)'; % To further improve the speed one may remove this sort command
% Just remember to take the transpose of "d" to get a result
% as a row vector instead of a column vector. | {
"alphanum_fraction": null,
"author": "goGPS-Project",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/goGPS-Project-goGPS_MATLAB/goGPS_MATLAB-30644df61d2459e3347ac5f3e31b71d9f69f4b01/source/utility/divisor.m",
"reason": null,
"repo": "goGPS_MATLAB",
"save_path": "github-repos/MATLAB/goGPS-Project-goGPS_MATLAB",
"sha": "30644df61d2459e3347ac5f3e31b71d9f69f4b01",
"size": null
} |
[STATEMENT]
lemma ins_trees: "t \<in> trees A \<Longrightarrow> (x,y) \<in> A \<Longrightarrow> ins x y t \<in> trees A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>t \<in> trees A; (x, y) \<in> A\<rbrakk> \<Longrightarrow> ins x y t \<in> trees A
[PROOF STEP]
by (induction x y t rule: ins.induct)
(auto split: tree.splits simp: ins_neq_Leaf) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Treaps_Random_Treap",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 146,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import numpy as np
import torch
from torch.utils.data import Dataset
class XORDataset(Dataset):
def __init__(self, length, shape=None):
"""
Arguments:
length (int): length of dataset, which equals `len(self)`.
shape (list, tuple, optional): shape of dataset. If it isn't
specified, it will be initialized to `(length, 8)`.
Default: None.
"""
_shape = (length,) + tuple(shape) if shape else (length, 8)
raw = np.random.randint(0, 2, _shape)
self.data = torch.FloatTensor(raw)
self.label = (
torch.tensor(np.bitwise_xor.reduce(raw, axis=1)).unsqueeze(dim=1).float()
)
def __getitem__(self, index):
return self.data[index], self.label[index]
def __len__(self):
return len(self.data)
class ExtraXORDataset(XORDataset):
""" A XOR dataset which is able to return extra values. """
def __init__(self, length, shape=None, extra_dims=1):
"""
Arguments:
length (int): length of dataset, which equals `len(self)`.
shape (list, tuple, optional): shape of dataset. If it isn't
specified, it will be initialized to `(length, 8)`.
Default: None.
extra_dims (int, optional): dimension of extra values.
Default: 1.
"""
super(ExtraXORDataset, self).__init__(length, shape=shape)
if extra_dims:
_extra_shape = (length, extra_dims)
self.extras = torch.randint(0, 2, _extra_shape)
else:
self.extras = None
def __getitem__(self, index):
if self.extras is not None:
retval = [self.data[index], self.label[index]]
retval.extend([v for v in self.extras[index]])
return retval
else:
return self.data[index], self.label[index]
def __len__(self):
return len(self.data)
| {
"alphanum_fraction": 0.5778229908,
"author": null,
"avg_line_length": 33.3220338983,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "12c33e67691401db4a1fbebcf192e905a66cd395",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-05-25T20:25:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-25T20:25:19.000Z",
"max_forks_repo_head_hexsha": "b0b0ecd28c0578ad1d5ed06d2442f9a2c29c72e2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "AlexGrig/pytorch-lr-finder",
"max_forks_repo_path": "tests/dataset.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "b0b0ecd28c0578ad1d5ed06d2442f9a2c29c72e2",
"max_issues_repo_issues_event_max_datetime": "2020-05-25T20:50:58.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-05-25T20:40:01.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AlexGrig/pytorch-lr-finder",
"max_issues_repo_path": "tests/dataset.py",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b0b0ecd28c0578ad1d5ed06d2442f9a2c29c72e2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AlexGrig/pytorch-lr-finder",
"max_stars_repo_path": "tests/dataset.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 452,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1966
} |
module
const version = "Dice v0.1.0"
const author = "Timo Sarkar"
#includes come here....
#
#
#
if "--uci" ∈ ARGS
uci_loop()
elseif "--xboard" ∈ ARGS
xboard_loop()
elseif "--repl" ∈ ARGS
repl_loop()
end
function test_refactor()
println()
b = new_game()
println()
printbd(b)
moves = generate_moves(b)
move = moves[2]
println(move)
prior_castling_rights = b.castling_rights
prior_last_move_pawn_doublre_push = b.last_move_pawn_double_push
make_move!(n, move)
unmake_move!(b, move, prior_castling_rights,
prior_last_move_pawn_doublre_push)
printbd(b)
end
# test_refactor()
function test_movelist()
b = new_game()
@show b
m = generate_moves(b)
@show m
make_move!(b, m[1])
println( b.game_movelist )
end
export WHITE, BLACK
export KING, QUEEN, ROOK, BISHOP, KNIGHT, PAWN
export A, B, C, D, E, F, G, H
export square
export CASTLING_RIGHTS_ALL
export generate_moves, make_move!, unmake_move!
export numner_of_moves
export Board, set!, new_game, new_game_960
export read_fen, write_fen, printbd
export play, random_play_both_side, perft
export best_move_search, best_move_negamax, best_move_alphabeta
end
| {
"alphanum_fraction": 0.7251712329,
"author": null,
"avg_line_length": 19.1475409836,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "3beaab587859c25dfe30ebfb8c54f151972d8991",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-10-10T13:36:49.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-10-10T13:36:49.000Z",
"max_forks_repo_head_hexsha": "ff1af0d1868d2272980f837d76b9eb6f9cc2b1f5",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "dicelanguage/dice",
"max_forks_repo_path": "dice.jl",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "1f1eadaad44b41cd28ec8d327607f7af64b3e4cd",
"max_issues_repo_issues_event_max_datetime": "2020-10-15T09:05:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-09T09:33:28.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "timo-cmd2/dice",
"max_issues_repo_path": "dice.jl",
"max_line_length": 66,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "1f1eadaad44b41cd28ec8d327607f7af64b3e4cd",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "timo-cmd2/dice",
"max_stars_repo_path": "dice.jl",
"max_stars_repo_stars_event_max_datetime": "2021-05-10T08:44:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-10T13:36:51.000Z",
"num_tokens": 359,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1168
} |
-- WARNING: This file was generated automatically by Vehicle
-- and should not be modified manually!
-- Metadata
-- - Agda version: 2.6.2
-- - AISEC version: 0.1.0.1
-- - Time generated: ???
open import Data.Unit
module MyTestModule where
e1 : let x = ⊤ in x
e1 = checkProperty record
{ databasePath = DATABASE_PATH
; propertyUUID = ????
} | {
"alphanum_fraction": 0.6780626781,
"author": null,
"avg_line_length": 21.9375,
"converted": null,
"ext": "agda",
"file": null,
"hexsha": "1a242244d333b730622c3c2457329fc5d0d31ea7",
"include": null,
"lang": "Agda",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7cdd7734fe0d50cc7d5a3b3c6bdddba778cfe6df",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Yiergot/vehicle",
"max_forks_repo_path": "examples/simple/let/let-output.agda",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7cdd7734fe0d50cc7d5a3b3c6bdddba778cfe6df",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Yiergot/vehicle",
"max_issues_repo_path": "examples/simple/let/let-output.agda",
"max_line_length": 60,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7cdd7734fe0d50cc7d5a3b3c6bdddba778cfe6df",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Yiergot/vehicle",
"max_stars_repo_path": "examples/simple/let/let-output.agda",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 107,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 351
} |
import numpy as np
from . import utils
from .gaussian import GaussianLikelihood
class PSLikelihood(GaussianLikelihood):
name: str = "TT"
kind: str = "tt"
lmax: int = 6000
def get_requirements(self):
return {"Cl": {self.kind: self.lmax}}
def _get_Cl(self):
return self.theory.get_Cl(ell_factor=True)
def _get_theory(self, **params_values):
cl_theory = self._get_Cl()
return cl_theory[self.kind][:self.lmax]
class BinnedPSLikelihood(PSLikelihood):
binning_matrix_path: str = ""
def initialize(self):
self.binning_matrix = self._get_binning_matrix()
self.bin_centers = \
self.binning_matrix.dot(np.arange(self.binning_matrix.shape[1]))
super().initialize()
@classmethod
def binner(cls, x, y, bin_edges):
return utils.binner(x, y, bin_edges)
def _get_binning_matrix(self):
return np.loadtxt(self.binning_matrix_path)
def _get_data(self):
return self.bin_centers, np.loadtxt(self.datapath)
def _get_theory(self, **params_values):
cl_theory = self._get_Cl()
return self.binning_matrix.dot(cl_theory[self.kind][:self.lmax])
| {
"alphanum_fraction": 0.6605657238,
"author": null,
"avg_line_length": 26.7111111111,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e03fae5b4777557995ead000f1a414d9f4fb978f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2022-03-01T19:05:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-20T11:03:32.000Z",
"max_forks_repo_head_hexsha": "e8d92423ba433f15bda3a01463f357647e1ffa8c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "itrharrison/SOLikeT-itrharrison",
"max_forks_repo_path": "soliket/ps.py",
"max_issues_count": 35,
"max_issues_repo_head_hexsha": "e8d92423ba433f15bda3a01463f357647e1ffa8c",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T12:13:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-06-26T06:47:43.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "itrharrison/SOLikeT-itrharrison",
"max_issues_repo_path": "soliket/ps.py",
"max_line_length": 88,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "e8d92423ba433f15bda3a01463f357647e1ffa8c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "itrharrison/SOLikeT-itrharrison",
"max_stars_repo_path": "soliket/ps.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-22T17:31:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-14T17:35:23.000Z",
"num_tokens": 299,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1202
} |
# https://www.hackerrank.com/challenges/np-dot-and-cross/problem
import numpy
a=int(input())
a1=numpy.array([list(map(int,input().split())) for _ in range(a)])
a2=numpy.array([list(map(int,input().split())) for _ in range(a)])
print(numpy.dot(a1,a2))
| {
"alphanum_fraction": 0.6956521739,
"author": null,
"avg_line_length": 28.1111111111,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a872cf6b2b7de556a81e068c839bc7769bac3c43",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-03-13T10:08:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-03-13T10:08:18.000Z",
"max_forks_repo_head_hexsha": "a2b6ddfdf0cb51ceac84857d566b57c6a5bb4be8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "allenalvin333/Hackerrank_Python",
"max_forks_repo_path": "23.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a2b6ddfdf0cb51ceac84857d566b57c6a5bb4be8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "allenalvin333/Hackerrank_Python",
"max_issues_repo_path": "23.py",
"max_line_length": 66,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a2b6ddfdf0cb51ceac84857d566b57c6a5bb4be8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "allenalvin333/Hackerrank_Python",
"max_stars_repo_path": "23.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 71,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 253
} |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
class Plotting:
def __init__(self, df: pd.DataFrame) -> None:
self.df = df
def plot(self):
import matplotlib.ticker as plticker
f2 = plt.figure(figsize=(24, 9))
ax2 = plt.axes()
loc = plticker.AutoLocator()
ax2.xaxis.set_major_locator(loc)
return f2, ax2
class Indicator:
def __init__(self, df: pd.DataFrame, indicator, timeframe) -> None:
self.df = df
self.data = self._calc(indicator, timeframe)
self.legend = []
self.plot()
def _calc(self, indicator, timeframe) -> np.ndarray:
return indicator(self.df.close, timeframe)
def plot(self) -> None:
plt.plot(self.df.index.astype(str), self.data, color="c")
class Sma(Indicator):
def __init__(self, df: pd.DataFrame, timeframe: int) -> None:
from ta.trend import sma_indicator
super().__init__(df, sma_indicator, timeframe)
self.legend = ["sma_" + str(timeframe)]
class Ema(Indicator):
def __init__(self, df: pd.DataFrame, timeframe: int) -> None:
from ta.trend import ema_indicator
super().__init__(df, ema_indicator, timeframe)
self.legend = ["ema" + str(timeframe)]
class Ichimoku:
def __init__(self, df: pd.DataFrame) -> None:
from ta.trend import IchimokuIndicator
self.df = df
self.data = self._calc(IchimokuIndicator)
self.legend = ["conv_line", "base_line", "leadspan_a", "leadspan_b"]
self.plot()
def _calc(self, indicator) -> None:
ichimoku = indicator(
self.df[["open", "high", "low", "close"]].max(axis=1),
self.df[["open", "high", "low", "close"]].min(axis=1),
)
self.conv_line = ichimoku.ichimoku_conversion_line()
self.base_line = ichimoku.ichimoku_base_line()
self.leadspan_a = ichimoku.ichimoku_a()
self.leadspan_b = ichimoku.ichimoku_b()
def plot(self) -> None:
plt.plot(self.df.index.astype(str), self.conv_line, color="darkorange")
plt.plot(self.df.index.astype(str), self.base_line, color="dodgerblue")
plt.plot(self.df.index.astype(str), self.leadspan_a.shift(26), color="green")
plt.plot(self.df.index.astype(str), self.leadspan_b.shift(26), color="red")
plt.plot(self.df.index.astype(str), self.df.close.shift(-26), color="plum")
plt.fill_between(
self.df.index.astype(str),
self.leadspan_a.shift(26).values,
self.leadspan_b.shift(26).values,
alpha=0.5,
color="lightblue",
)
plt.legend(self.legend)
class Macd(Plotting):
"""
Plots MACD graph
Args:
timeframe_short : int
short timeframe used to calculate macd
timeframe_long : int
long timeframe used to calculate macd
timeframe_signal : int
signal timeframe used to calculate macd signal
"""
def __init__(
self,
df: pd.DataFrame,
timeframe_short=12,
timeframe_long=26,
timeframe_signal=9,
) -> None:
from ta.trend import MACD
super().__init__(df)
self.data = self._calc(MACD, timeframe_short, timeframe_long, timeframe_signal)
self.legend = []
def _calc(self, indicator, timeframe_short, timeframe_long, timeframe_signal):
macd = indicator(
self.df.close, timeframe_short, timeframe_long, timeframe_signal
)
self.macd = macd.macd()
self.macd_signal = macd.macd_signal()
self.macd_diff = macd.macd_diff()
def plot(self):
f2, ax2 = super().plot()
ax2.plot(self.macd.index.astype(str), self.macd.values, color="green")
ax2.plot(
self.macd_signal.index.astype(str), self.macd_signal.values, color="red"
)
ax2.bar(
self.macd_diff.index.astype(str), self.macd_diff.values, color="lightblue"
)
ax2.legend(["macd", "macd_signal", "macd_diff"])
return ax2
class RSI(Plotting):
def __init__(self, df, rsi=None) -> None:
super().__init__(df)
if rsi is not None:
self.rsi = rsi
else:
self._calc()
def _calc(self, timeframe=14):
from ta.momentum import rsi
self.rsi = rsi(self.df["close"], window=timeframe)
def plot(self):
f2, ax2 = super().plot()
ax2.plot(self.df.index.astype(str), self.rsi, color="cyan")
ax2.legend(["rsi"])
ax2.axhline(y=70)
ax2.axhline(y=30)
return ax2
| {
"alphanum_fraction": 0.6019375673,
"author": null,
"avg_line_length": 29.9677419355,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8df8ba6eb2208f6647767bd925fa4695915a3f1c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6e833e3e0924073988d2bbf049fbbec03e3f7596",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "t4skmanag3r/algotrade",
"max_forks_repo_path": "algotrade/ploting.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e833e3e0924073988d2bbf049fbbec03e3f7596",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "t4skmanag3r/algotrade",
"max_issues_repo_path": "algotrade/ploting.py",
"max_line_length": 87,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e833e3e0924073988d2bbf049fbbec03e3f7596",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "t4skmanag3r/algotrade",
"max_stars_repo_path": "algotrade/ploting.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1175,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4645
} |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import torch.nn as nn
import torch
import torch.nn.functional as F
import math
from .torchdiffeq import odeint, odeint_adjoint
class ode_func_class(torch.nn.Module):
def __init__(self, decoder, time_dependent=False):
super(ode_func_class, self).__init__()
self.decoder = decoder
self.time_dependent = time_dependent
def forward(self, t, y, conditioning):
if self.time_dependent:
y = torch.cat([conditioning, y, t.repeat(y.shape[0]).view(-1,1)], dim=1)
else:
y = torch.cat([conditioning, y], dim=1)
out = self.decoder.patch_network_forward(y, ode=False)
return out
class RegularizedODEfunc(torch.nn.Module):
# based on https://github.com/rtqichen/ffjord/blob/bce4d2def767f2b9a3288ae0b5d43781ad4dc6b1/lib/layers/wrappers/cnf_regularization.py
def __init__(self, odefunc, regularization_fns):
super(RegularizedODEfunc, self).__init__()
self.odefunc = odefunc
self.regularization_fns = regularization_fns
self.num_regularizations = len(self.regularization_fns)
def forward(self, t, x, conditioning):
class SharedContext(object):
pass
assert len(conditioning) == 1
conditioning = conditioning[0]
with torch.enable_grad():
[state.requires_grad_(True) for state in x]
input_states = x[:-self.num_regularizations]
assert len(input_states) == 1
input_states = input_states[0]
y = self.odefunc(t, input_states, conditioning)
reg_states = tuple(func(input_states, conditioning, y, SharedContext) for name, weight, func in self.regularization_fns)
return (y,) + reg_states
def _batch_root_mean_squared(tensor):
tensor = tensor.view(tensor.shape[0], -1)
return torch.mean(torch.norm(tensor, p=2, dim=1) / tensor.shape[1]**0.5)
def l1_regularzation_fn(input_states, conditioning, y, unused_context):
del input_states, conditioning
return torch.mean(torch.abs(y))
def l2_regularzation_fn(input_states, conditioning, y, unused_context):
del input_states, conditioning
return _batch_root_mean_squared(y)
def directional_l2_regularization_fn(input_states, conditioning, y, unused_context):
directional_dx = torch.autograd.grad(y, input_states, y, create_graph=True)[0]
return _batch_root_mean_squared(directional_dx)
def jacobian_frobenius_regularization_fn(input_states, conditioning, y, context):
if hasattr(context, "jac"):
jac = context.jac
else:
jac = _get_minibatch_jacobian(y, input_states)
context.jac = jac
return _batch_root_mean_squared(jac)
def divergence_approx(input_states, conditioning, y, context, as_loss=True):
# avoids explicitly computing the Jacobian
del conditioning
if hasattr(context, "e_dydx"):
e = context.e
e_dydx = context.e_dydx
else:
e = torch.randn_like(y)
e_dydx = torch.autograd.grad(y, input_states, e, create_graph=True)[0]
context.e = e
context.e_dydx = e_dydx
e_dydx_e = e_dydx * e
approx_tr_dydx = e_dydx_e.view(y.shape[0], -1).sum(dim=1)
if as_loss: # want to push positive and negative divergence to zero
approx_tr_dydx = torch.abs(approx_tr_dydx)
return torch.mean(approx_tr_dydx)
def jacobian_frobenius_approx(input_states, conditioning, y, context):
# avoids explicitly computing the Jacobian. see https://arxiv.org/pdf/2002.02798.pdf "How to train your Neural ODE" for more
del conditioning
if hasattr(context, "e_dydx"):
e = context.e
e_dydx = context.e_dydx
else:
e = torch.randn_like(y)
e_dydx = torch.autograd.grad(y, input_states, e, create_graph=True)[0]
context.e = e
context.e_dydx = e_dydx
approx_jac_frob = torch.norm(e_dydx, p=2, dim=-1)
return torch.mean(approx_jac_frob)
def _get_minibatch_jacobian(y, x, create_graph=False):
"""Computes the Jacobian of y wrt x assuming minibatch-mode.
Args:
y: (N, ...) with a total of D_y elements in ...
x: (N, ...) with a total of D_x elements in ...
Returns:
The minibatch Jacobian matrix of shape (N, D_y, D_x)
"""
assert y.shape[0] == x.shape[0]
y = y.view(y.shape[0], -1)
# Compute Jacobian row by row.
jac = []
for j in range(y.shape[1]):
dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True,
create_graph=True)[0].view(x.shape[0], -1)
jac.append(torch.unsqueeze(dy_j_dx, 1))
jac = torch.cat(jac, 1)
return jac
class Decoder(nn.Module):
def __init__(
self,
patch_latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
xyz_in_all=None,
use_tanh=False,
latent_dropout=False,
patch_encoder=None,
do_code_regularization=True,
# mixture
num_patches=1,
mixture_latent_size=None,
non_variable_patch_radius=None,
use_rotations=True,
pull_patches_to_uncovered_surface=False,
pull_free_space_patches_to_surface=False,
loss_on_patches_instead_of_mixture=False,
align_patch_rotation_with_normal=False,
weight_threshold=None,
train_patch_network=True,
train_object_to_patch=True,
patch_network_pretrained_path=None,
results_folder=None,
script_mode=False,
mixture_latent_mode="all_implicit",
posrot_latent_size=None,
variable_patch_scaling=False,
keep_scales_small=False,
scales_low_variance=False,
mixture_to_patch_parameters=None,
use_depth_encoder=False,
use_tiny_patchnet=False,
positional_encoding=False,
pretrained_depth_encoder_weights=None,
use_curriculum_weighting=False,
minimum_scale=0.0,
maximum_scale=1000.,
use_ode=False,
time_dependent_ode=False,
device=None
):
super(Decoder, self).__init__()
def make_sequence():
return []
self.use_ode = use_ode
self.time_dependent_ode = time_dependent_ode
self.positional_encoding = positional_encoding
self.positional_encoding_frequencies = 3 # this is "L" in Neural Radiance Fields. NeRF uses L = 4.
self.use_tiny_patchnet = use_tiny_patchnet
original_coordinate_size = 4 if use_ode and time_dependent_ode else 3
coordinate_size = original_coordinate_size*2*self.positional_encoding_frequencies if positional_encoding else original_coordinate_size
self.dims = dims = [(0 if use_tiny_patchnet else patch_latent_size) + coordinate_size] + dims + [3 if self.use_ode else 1]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
self.do_code_regularization = do_code_regularization
self.use_curriculum_weighting = use_curriculum_weighting
self.num_patches = num_patches
self.mixture_latent_mode = mixture_latent_mode
self.posrot_latent_size = posrot_latent_size
self.non_variable_patch_radius = non_variable_patch_radius
self.variable_patch_scaling = variable_patch_scaling
self.minimum_scale = minimum_scale
self.maximum_scale = maximum_scale
self.keep_scales_small = keep_scales_small
self.weight_threshold = weight_threshold
self.use_rotations = use_rotations
self.pull_patches_to_uncovered_surface = pull_patches_to_uncovered_surface
self.pull_free_space_patches_to_surface = pull_free_space_patches_to_surface
self.loss_on_patches_instead_of_mixture = loss_on_patches_instead_of_mixture
self.align_patch_rotation_with_normal = align_patch_rotation_with_normal
self.scales_low_variance = scales_low_variance
self.script_mode = script_mode
self.results_folder = results_folder
for layer in range(0, self.num_layers - 1):
if layer + 1 in latent_in:
out_dim = dims[layer + 1] - dims[0]
else:
out_dim = dims[layer + 1]
if self.xyz_in_all and layer != self.num_layers - 2:
out_dim -= coordinate_size
print(layer, dims[layer], out_dim)
if weight_norm and layer in self.norm_layers:
setattr(
self,
"patch_lin" + str(layer),
nn.utils.weight_norm(nn.Linear(dims[layer], out_dim)),
)
else:
setattr(self, "patch_lin" + str(layer), nn.Linear(dims[layer], out_dim))
if (
(not weight_norm)
and self.norm_layers is not None
and layer in self.norm_layers
):
setattr(self, "patch_bn" + str(layer), nn.LayerNorm(out_dim))
self.relu = nn.ReLU()
self.elu = nn.ELU()
self.softplus = nn.Softplus()
self.dropout_prob = dropout_prob
self.dropout = dropout
self.th = nn.Tanh()
self.patch_latent_size = patch_latent_size
self.mixture_latent_size = mixture_latent_size
self.use_depth_encoder = use_depth_encoder
if self.use_depth_encoder:
from networks.selecsls import Net as depth_encoder
self.depth_encoder = depth_encoder(nClasses=self.mixture_latent_size)
weights = pretrained_depth_encoder_weights
pretrained_weights = torch.load(pretrained_depth_encoder_weights)
del pretrained_weights["classifier.0.weight"]
del pretrained_weights["classifier.0.bias"]
current_weights = self.depth_encoder.state_dict()
current_weights.update(pretrained_weights)
self.depth_encoder.load_state_dict(current_weights)
self.patch_encoder = patch_encoder
if patch_encoder is not None:
in_channels = 4 # xyz + SDF
for i, (layer_type, param, activation) in enumerate(patch_encoder["layers"]):
if layer_type == "FC":
out_channels, weight_norm = param
if out_channels <= 0: # special case: this is the last layer. it outputs the patch latent vector
out_channels = patch_latent_size
if weight_norm:
setattr(
self,
"patch_encoder_lin" + str(i),
nn.utils.weight_norm(nn.Linear(in_channels, out_channels)),
)
else:
setattr(self, "patch_encoder_lin" + str(i), nn.Linear(in_channels, out_channels))
in_channels = out_channels
if mixture_to_patch_parameters is None or mixture_latent_mode == "all_explicit":
self.mixture_to_patch_parameters = None
else:
self.mixture_to_patch_parameters = self._parse_mixture_to_patch_parameter_string(mixture_to_patch_parameters)
import numpy as np
def _initial_patch_center():
random_point = np.random.normal(size=(3,))
radius = 0.5
return radius * random_point / np.linalg.norm(random_point)
if self.mixture_latent_mode == "all_implicit":
if self.num_patches > 1:
in_channels = self.mixture_latent_size
final_channels = self.num_patches * (self.patch_latent_size + 3 + 3 + (1 if self.variable_patch_scaling else 0))
final_bias = np.concatenate([np.concatenate([np.zeros(self.patch_latent_size), _initial_patch_center(), np.zeros(3), (np.array([self.non_variable_patch_radius]) if self.variable_patch_scaling else np.zeros(0))]) for _ in range(self.num_patches)])
use_precomputed_bias_init = True
if use_precomputed_bias_init:
# See readme.txt
#sdf_filename = "shapenetv1/deepsdf_preprocessed/SdfSamples/ShapeNetV1/02691156/1b7ac690067010e26b7bd17e458d0dcb.obj.npz" # airplane
#sdf_filename = "shapenetv1/deepsdf_preprocessed/SdfSamples/ShapeNetV1/04256520/1731d53ab8b8973973800789ccff9705.obj.npz" # sofa
patch_latent_size = self.patch_latent_size
num_patches = self.num_patches
surface_sdf_threshold = 0.02
final_scaling_increase_factor = 1.2
initialization_file = sdf_filename + "_init_" + str(patch_latent_size) + "_" + str(num_patches) + "_" + str(surface_sdf_threshold) + "_" + str(final_scaling_increase_factor) + ("_tiny" if self.use_tiny_patchnet else "") + ".npy" # generated by train_deep_sdf.py
final_bias = np.load(initialization_file).reshape(final_channels)
initial_patch_latent = self._initialize_tiny_patchnet() if self.use_tiny_patchnet else np.zeros(self.patch_latent_size)
for i in range(num_patches):
metadata_size = 7
offset = (patch_latent_size + metadata_size) * i
final_bias[offset : offset + patch_latent_size] = initial_patch_latent
else: # single patch
in_channels = self.mixture_latent_size
final_channels = self.patch_latent_size
final_bias = self._initialize_tiny_patchnet() if self.use_tiny_patchnet else np.zeros(self.patch_latent_size)
elif self.mixture_latent_mode == "patch_explicit_meta_implicit":
in_channels = self.posrot_latent_size
final_channels = self.num_patches * (3 + 3 + (1 if self.variable_patch_scaling else 0))
final_bias = np.concatenate([np.concatenate([_initial_patch_center(), np.zeros(3), (np.array([0.3]) if self.variable_patch_scaling else np.zeros(0))]) for _ in range(self.num_patches)])
else:
raise RuntimeError("mixture_latent_mode and mixture_to_patch_parameters combination not supported")
for i, (layer_type, (out_channels, weight_norm), activation, dropout) in enumerate(self.mixture_to_patch_parameters["layers"]):
if layer_type == "FC":
is_final_layer = out_channels == -1
if is_final_layer:
out_channels = final_channels
layer = nn.Linear(in_channels, out_channels)
if is_final_layer:
layer.weight = nn.Parameter(layer.weight * 0.0001)
layer.bias = nn.Parameter(torch.tensor(torch.from_numpy(final_bias)).float().clone().detach())
if weight_norm:
layer = nn.utils.weight_norm(layer)
setattr(self, "object_to_patch_FC_" + str(i), layer)
if dropout > 0.:
setattr(self, "object_to_patch_dropout_" + str(i), nn.Dropout(dropout))
in_channels = out_channels
else:
raise RuntimeError("unknown layer type: " + str(layer_type))
self._init_patch_network_training(train_patch_network, patch_network_pretrained_path, results_folder)
for name, weight in self.named_parameters():
if "patch_" in name: # needs to be before object due to object_to_patch
weight.requires_grad = train_patch_network
if "object_" in name:
weight.requires_grad = train_object_to_patch
if "depth_encoder" in name:
weight.requires_grad = train_object_to_patch
print(name, weight.requires_grad)
def _init_patch_network_training(self, train_patch_network, patch_network_pretrained_path=None, results_folder=None):
if patch_network_pretrained_path == "":
patch_network_pretrained_path = None
if patch_network_pretrained_path is not None:
import os
if results_folder is None:
this_file = os.path.realpath(__file__)
results_folder = os.path.split(os.path.split(this_file)[0])[0] + "/"
copy_folder = results_folder + "backup/pretrained_patch_network/"
if not os.path.exists(copy_folder + "COPY_DONE"):
import pathlib
pathlib.Path(copy_folder).mkdir(parents=True, exist_ok=True)
root_pretrained = os.path.split(os.path.split(patch_network_pretrained_path)[0])[0] + "/"
weight_path = patch_network_pretrained_path
network_path = root_pretrained + "backup/networks/deep_sdf_decoder.py"
json_path = root_pretrained + "backup/specs.json"
workspace_path = root_pretrained + "backup/deep_sdf/workspace.py"
import shutil
shutil.copyfile(weight_path, copy_folder + "weight.pth")
shutil.copyfile(network_path, copy_folder + "deep_sdf_decoder.py")
shutil.copyfile(json_path, copy_folder + "specs.json")
shutil.copyfile(workspace_path, copy_folder + "workspace.py")
shutil.copytree(root_pretrained + "backup/", copy_folder + "backup/")
with open(copy_folder + "COPY_DONE", "w"):
pass
pretrained_weights = torch.load(copy_folder + "weight.pth")["model_state_dict"]
current_weights = self.state_dict()
prefix_len = len("module.")
pretrained_weights = {k[prefix_len:]:v for k,v in pretrained_weights.items() if k[prefix_len:] in current_weights}
current_weights.update(pretrained_weights)
self.load_state_dict(current_weights)
def _parse_mixture_to_patch_parameter_string(self, parameter_string):
layers = []
for layer in parameter_string.split(","):
out_channels, weight_norm, activation, dropout = layer.strip().split(" ")
out_channels = int(out_channels)
if weight_norm != "wn" and weight_norm != "nown":
raise RuntimeError("invalid parameter string for mixture-to-patch network: " + str(weight_norm))
weight_norm = weight_norm == "wn"
dropout = float(dropout)
activation = activation.strip().lower()
layers.append(("FC", (out_channels, weight_norm), activation, dropout))
return {"layers": layers}
def _initialize_tiny_patchnet(self):
import numpy as np
original_coordinate_size = 4 if self.use_ode and self.time_dependent_ode else 3
coordinate_size = original_coordinate_size * 2 * self.positional_encoding_frequencies if self.positional_encoding else original_coordinate_size
weights = []
for layer in range(0, len(self.dims) - 1):
input_dim = self.dims[layer]
output_dim = self.dims[layer+1]
if layer in self.latent_in:
input_dim += coordinate_size
matrix = torch.empty((output_dim, input_dim), dtype=torch.float32)
bias = torch.empty((output_dim), dtype=torch.float32)
import math
torch.nn.init.kaiming_uniform_(matrix, a=math.sqrt(5), nonlinearity="relu")
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(matrix)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(bias, -bound, bound)
if layer == len(self.dims) - 2:
matrix /= 1000.
bias /= 1000.
weights.append(matrix)
weights.append(bias)
initial_patch_latent = np.concatenate([weight.detach().reshape(-1).numpy() for weight in weights], axis=0)
print("the patch latent size for tiny PatchNets is: " + str(initial_patch_latent.size), flush=True)
return initial_patch_latent
def _convert_euler_to_matrix(self, angles):
# angles: N x 3
sine = torch.sin(angles)
cosine = torch.cos(angles)
sin_alpha, sin_beta, sin_gamma = sine[:,0], sine[:,1], sine[:,2]
cos_alpha, cos_beta, cos_gamma = cosine[:,0], cosine[:,1], cosine[:,2]
R00 = cos_gamma*cos_beta
R01 = -sin_gamma*cos_alpha + cos_gamma*sin_beta*sin_alpha
R02 = sin_gamma*sin_alpha + cos_gamma*sin_beta*cos_alpha
R10 = sin_gamma*cos_beta
R11 = cos_gamma*cos_alpha + sin_gamma*sin_beta*sin_alpha
R12 = -cos_gamma*sin_alpha + sin_gamma*sin_beta*cos_alpha
R20 = -sin_beta
R21 = cos_beta*sin_alpha
R22 = cos_beta*cos_alpha
R0 = torch.stack([R00, R01, R02], 1) # first row
R1 = torch.stack([R10, R11, R12], 1) # second row
R2 = torch.stack([R20, R21, R22], 1) # third row
R = torch.stack([R0, R1, R2], 1) # shape: (batch_size, row, column)
return R
def patch_network_forward(self, input, ode=None, ode_evaluation_times=[0., 1.], ode_return_pos=False, extra_loss=None):
original_coordinate_size = 4 if self.use_ode and self.time_dependent_ode else 3
xyz = input[:, -original_coordinate_size:] # samples x 3
device = xyz.get_device()
if ode is None:
ode = self.use_ode
if ode:
atol = 1e-5
rtol = 1e-5
max_num_steps = 1000
total_remaining_forced_step = 10
ode_func = ode_func_class(self, self.time_dependent_ode)
conditioning = input[:, :-3] # do not use original_coordinate_size here, since this is input from the outside, not from the ode solver
xyz = xyz[:, -3:] # do not use original_coordinate_size here
self.regularize_ode = True
regularize = self.training and self.regularize_ode
if regularize:
regularization_fns = [ ("velocity_L2", 0.001, l2_regularzation_fn), \
("jacobian_frobenius", 0.01, jacobian_frobenius_approx),\
("divergence", 0.001, divergence_approx), \
]
ode_func = RegularizedODEfunc(ode_func, regularization_fns)
xyz = (xyz,) + tuple(torch.tensor(0).to(xyz) for _ in range(len(regularization_fns)))
rtol = [rtol] + [1e20] * len(regularization_fns)
atol = [atol] + [1e20] * len(regularization_fns)
use_adjoint = True
method = "dopri5" # "dopri5" or "rk4"
if method == "dopri5":
options = {"total_remaining_forced_step": total_remaining_forced_step, "max_num_steps": max_num_steps}
elif method == "rk4":
options = {"step_size": 0.25}
else:
options = None
evaluation_times = torch.tensor(ode_evaluation_times, device=device)
odeint_func = odeint_adjoint if use_adjoint else odeint
ode_pos = odeint_func(ode_func, xyz, conditioning, evaluation_times, rtol=rtol, atol=atol, method=method, options=options) # evaluation_times x samples x xyz
if regularize:
ode_returned = ode_pos
ode_regularization_losses = ode_returned[1:]
assert extra_loss is not None
for (name, weight, func), loss in zip(regularization_fns, ode_regularization_losses):
extra_loss[name] = weight * loss[-1] # take loss at the last timestep, which should be the right thing to do to get the full loss
ode_pos = ode_returned[0]
x = ode_pos[-1,:,2].reshape(-1,1) # take z coordinate as distance to flat patch
else:
if self.positional_encoding:
wrap_around = False
coordinate_size = original_coordinate_size * 2 * self.positional_encoding_frequencies
if not wrap_around:
xyz = xyz / 2. # scale [-1,+1] to [-0.5,+0.5]
xyz = math.pi * xyz # samples x 3
xyz = xyz.view(-1, original_coordinate_size, 1).repeat(1, 1, self.positional_encoding_frequencies) # samples x 3 x L
xyz *= 2**torch.arange(self.positional_encoding_frequencies, device=device).float().view(1, 1, -1) # samples x 3 x L
xyz = torch.cat([torch.sin(xyz), torch.cos(xyz)], dim=-1) # samples x 3 x 2L
xyz = xyz.view(-1, original_coordinate_size * 2 * self.positional_encoding_frequencies) # samples x 3*2L
input = torch.cat([input[:,:-original_coordinate_size], xyz], dim=1)
else:
coordinate_size = original_coordinate_size
if input.shape[1] > coordinate_size and self.latent_dropout:
latent_vecs = input[:, :-coordinate_size]
latent_vecs = F.dropout(latent_vecs, p=0.2, training=self.training)
x = torch.cat([latent_vecs, xyz], 1)
else:
x = input
if self.use_tiny_patchnet:
# tiny patchnet:
# latent vector has parameters for len(self.dims)-1 many layers (this is len(dims)+1 when looking at specs.json).
# each layer has (input+1)*output many weights (including a bias). the order is: matrix, then bias.
# the coordinate (xyz or positional encoding) is fed into the first and fourth layer.
x = input[:, -coordinate_size:] # samples x input_dim
latents = input[:, :-coordinate_size] # samples x latent
latent_offset = 0
for layer in range(0, self.num_layers - 1):
input_dim = self.dims[layer]
output_dim = self.dims[layer+1]
if layer in self.latent_in:
input_dim += coordinate_size
x = torch.cat([x, input[:,-coordinate_size:]], 1)
matrix_weights = latents[:, latent_offset : latent_offset + (input_dim * output_dim)] # samples x (input_dim*output_dim)
latent_offset += input_dim * output_dim
bias_weights = latents[:, latent_offset : latent_offset + output_dim] # samples x output_dim
latent_offset += output_dim
matrix_weights = matrix_weights.reshape(-1, output_dim, input_dim) # samples x output_dim x input_dim
bias_weights = bias_weights.reshape(-1, output_dim, 1) # samples x output_dim x 1
# x: samples x input_dim
x = torch.matmul(matrix_weights, x.view(-1, input_dim, 1)) + bias_weights # samples x output_dim x 1
x = x.squeeze(-1)
if layer < self.num_layers - 2:
x = self.relu(x)
else:
for layer in range(0, self.num_layers - 1):
lin = getattr(self, "patch_lin" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input], 1)
elif layer != 0 and self.xyz_in_all:
x = torch.cat([x, xyz], 1)
x = lin(x)
if layer < self.num_layers - 2:
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "patch_bn" + str(layer))
x = bn(x)
#x = self.softplus(x)
x = self.relu(x)
#x = self.elu(x)
if self.dropout is not None and layer in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
if hasattr(self, "th") and not self.use_ode:
x = self.th(x)
if ode and ode_return_pos:
return x, ode_pos
else:
return x
# input: N x (L+3)
def forward(self, input):
if type(input) != dict:
#print("not giving a dict as input is dangerous, as some of the losses assume proper scene information to work as intended")
standard_input = input
input = {}
input["num_samp_per_scene"] = 1
input["standard_input"] = standard_input # samples x (mixture_latent_size + 3)
if self.mixture_latent_mode == "patch_explicit_meta_implicit":
input["mixture_latent_vectors"] = standard_input[0,:-3].reshape(1, -1)
input["xyz"] = standard_input[:, -3:].reshape(1, -1, 3)
input["num_samp_per_scene"] = input["xyz"].shape[1]
else:
input["mixture_latent_vectors"] = standard_input[:, :-3].reshape(-1, input["num_samp_per_scene"], self.mixture_latent_size)
input["xyz"] = standard_input[:, -3:].reshape(-1, input["num_samp_per_scene"], 3)
input["use_patch_encoder"] = False
input["mixture_latent_mode"] = self.mixture_latent_mode
extra_loss = {}
mixture_latent_mode = input["mixture_latent_mode"]
device = input["xyz"].get_device()
if self.use_depth_encoder:
depth_maps = input["depth_maps"]
depth_maps = depth_maps.view(-1, 1, 400, 400).repeat(1,3,1,1) # treat as RGB channels
encoder_output = self.depth_encoder(depth_maps) # num_scenes x mixture_latent_size
input["mixture_latent_vectors"] = encoder_output
if self.num_patches > 1:
mixture_latent_vectors = input["mixture_latent_vectors"] # num_scenes x mixture_latent_size
xyz = input["xyz"] # num_scenes x samp_per_scene x 3
patch_metadata_input = mixture_latent_vectors
# transform input latent vectors to patch metadata
if mixture_latent_mode == "all_explicit":
patch_metadata = patch_metadata_input # num_scenes x (num_patches * (patch_latent_size + 3 + 3 + 1)) # patch latent, position, rotation, scaling
elif mixture_latent_mode == "patch_explicit_meta_implicit":
patch_metadata = patch_metadata_input # num_scenes x (posrot_latent_size + num_patches * patch_latent_size) # patch latent, implicit (position, rotation, scaling)
_patch_latents = patch_metadata[:,self.posrot_latent_size:].view(-1, self.num_patches, self.patch_latent_size)
patch_metadata = patch_metadata[:,:self.posrot_latent_size] # num_scenes x posrot_latent_size
posrot_latents = patch_metadata.clone()
for i, (layer_type, _, activation, dropout) in enumerate(self.mixture_to_patch_parameters["layers"]):
if layer_type == "FC":
layer = getattr(self, "object_to_patch_FC_" + str(i))
patch_metadata = layer(patch_metadata)
if dropout > 0.:
dropout = getattr(self, "object_to_patch_dropout_" + str(i))
patch_metadata = dropout(patch_metadata)
if activation == "relu":
patch_metadata = self.relu(patch_metadata)
elif activation == "elu":
patch_metadata = self.elu(patch_metadata)
elif activation != "none":
raise RuntimeError("wrong activation:" + activation)
# patch_metadata: num_scenes x num_patches * (3 + 3 + 1)
# _patch_latents: num_scenes x num_patches x patch_latent_size
num_scenes = patch_metadata.shape[0]
patch_metadata = torch.cat([_patch_latents, patch_metadata.view(-1, self.num_patches, 3+3+1)], dim=-1).view(num_scenes, -1)
elif mixture_latent_mode == "all_implicit":
patch_metadata = patch_metadata_input # num_scenes x object_latent # implicit (patch latent, position, rotation, scaling)
for i, (layer_type, _, activation, dropout) in enumerate(self.mixture_to_patch_parameters["layers"]):
if layer_type == "FC":
layer = getattr(self, "object_to_patch_FC_" + str(i))
patch_metadata = layer(patch_metadata)
if dropout > 0.:
dropout = getattr(self, "object_to_patch_dropout_" + str(i))
patch_metadata = dropout(patch_metadata)
if activation == "relu":
patch_metadata = self.relu(patch_metadata)
elif activation == "elu":
patch_metadata = self.elu(patch_metadata)
elif activation != "none":
raise RuntimeError("wrong activation:" + activation)
patch_metadata = patch_metadata.reshape(-1, self.num_patches, self.patch_latent_size + 3 + 3 + (1 if self.variable_patch_scaling else 0)) # num_scenes x num_patches x (patch_latent_size + 3 + 3 + 1)
global_xyz = xyz.repeat(1, 1, self.num_patches).view(-1, input["num_samp_per_scene"], self.num_patches, 3) # num_scenes x samp_per_scene x num_patches x 3
patch_latent_vectors = patch_metadata[:, :, :self.patch_latent_size] # num_scenes x num_patches x patch_latent_size
patch_position = patch_metadata[:, :, self.patch_latent_size:self.patch_latent_size+3] # num_scenes x num_patches x 3
patch_rotation = patch_metadata[:, :, self.patch_latent_size+3:self.patch_latent_size+6] # num_scenes x num_patches x 3
if self.variable_patch_scaling:
patch_scaling = patch_metadata[:, :, self.patch_latent_size+6:self.patch_latent_size+7] # num_scenes x num_patches x 1. this is the scaling of the patch size, i.e. a value of 2 means that the patch's radius is twice as big
if self.minimum_scale > 0.:
#minimum_scaling = 0.01
patch_scaling = torch.clamp(patch_scaling, min=self.minimum_scale)
if self.maximum_scale != 1000.:
#maximum_scaling = 0.5
patch_scaling = torch.clamp(patch_scaling, max=self.maximum_scale)
else:
patch_scaling = self.non_variable_patch_radius * torch.ones((patch_position.shape[0], patch_position.shape[1], 1), device=device)
fix_metadata_regression_by_decreasing_patch_scale = True
if fix_metadata_regression_by_decreasing_patch_scale:
if "current_stage" in input and input["current_stage"] == "1":
patch_scaling /= 1.3
patch_xyz = global_xyz.clone()
patch_xyz -= patch_position.unsqueeze(1) # num_scenes x samp_per_scene x num_patches x 3
unscaled_center_distances_nonflat = torch.norm(patch_xyz, dim=-1) # num_scenes x samp_per_scene x num_patches
scaled_center_distances_nonflat = unscaled_center_distances_nonflat / patch_scaling.squeeze(-1).unsqueeze(1)
scaled_center_distances = scaled_center_distances_nonflat.flatten() # scaled distances to patch center
unscaled_center_distances = unscaled_center_distances_nonflat.flatten() # unscaled distances to patch center
patch_weight_type = "gaussian"
if patch_weight_type == "binary":
patch_weights = (scaled_center_distances < 1.).to(torch.float).detach() # num_scenes * samp_per_scene * num_patches
elif patch_weight_type == "gaussian":
std_cutoff = 3.
smooth_patch_seam_weights = True
patch_weight_std = 1. / std_cutoff
import numpy as np
distances_to_use = scaled_center_distances # if self.patch_scaling else unscaled_center_distances
patch_weights = torch.zeros_like(scaled_center_distances)
patch_mask = scaled_center_distances < 1.
patch_weights[patch_mask] = torch.exp( -0.5 * (scaled_center_distances[patch_mask]/patch_weight_std)**2 ) - (np.exp(-0.5 * std_cutoff**2) if smooth_patch_seam_weights else 0.) # samples * num_patches
patch_weights[~patch_mask] = 0.
else:
raise RuntimeError("missing patch_weight_type")
patch_weights = patch_weights.view(-1, self.num_patches) # samples x num_patches
patch_weight_normalization = torch.sum(patch_weights, 1) # samples
patch_weight_norm_mask = patch_weight_normalization == 0.
patch_weights[patch_weight_norm_mask,:] = 0.0
patch_weights[~patch_weight_norm_mask,:] = patch_weights[~patch_weight_norm_mask,:] / patch_weight_normalization[~patch_weight_norm_mask].unsqueeze(-1)
patch_weights = patch_weights.view(-1) # samples * num_patches
if self.use_rotations:
rotations = self._convert_euler_to_matrix(patch_rotation.reshape(-1, 3)).view(-1, self.num_patches, 3, 3) # num_scenes x num_patches x 3 x 3
# first argument: num_scenes x 1 x num_patches x 3 x 3
# second argument: num_scenes x samp_per_scene x num_patches x 3 x 1
patch_xyz = torch.matmul(torch.transpose(rotations, -2, -1).unsqueeze(1), patch_xyz.unsqueeze(-1)) # num_scenes x samp_per_scene x num_patches x 3 x 1
# patch_scaling: num_scenes x num_patches x 1
# patch_xyz: num_scenes x samp_per_scene x num_patches x 3 x 1
patch_xyz /= patch_scaling.unsqueeze(1).unsqueeze(-1)
if input["use_patch_encoder"]:
sdf_gt = input["sdf_gt"] # num_scenes x samp_per_scene x 1
per_patch_sdf_gt = sdf_gt / patch_scaling.unsqueeze(1).squeeze(-1) # num_scenes x samp_per_scene x num_patches
patch_encoder_input = torch.cat([patch_xyz.squeeze(-1), per_patch_sdf_gt.unsqueeze(-1)], dim=-1) # num_scenes x samp_per_scene x num_patches x 4
patch_encoder_input = patch_encoder_input.view(-1, 4) # samples * num_patches x 4
optimize_input_of_patch_encoder = True
if optimize_input_of_patch_encoder:
samples_in_patches_mask = patch_weights > self.weight_threshold
patch_encoder_input = patch_encoder_input[samples_in_patches_mask,:]
x = patch_encoder_input
for i, (layer_type, param, activation) in enumerate(self.patch_encoder["layers"]):
if layer_type == "FC":
lin = getattr(self, "patch_encoder_lin" + str(i))
x = lin(x)
if activation == "relu":
x = self.relu(x)
if layer_type == "max":
activation = param
features_size = x.shape[-1]
x = self.relu(x) # some_samples x features_size. this allows to use "0" as a dummy value in helper_tensor_for_max
num_scenes = sdf_gt.shape[0]
helper_tensor_for_max = torch.zeros((num_scenes, input["num_samp_per_scene"], self.num_patches, features_size), device=device)
original_shape_helper_tensor = helper_tensor_for_max.shape
helper_tensor_for_max = helper_tensor_for_max.view(-1, features_size)
helper_tensor_for_max[samples_in_patches_mask,:] = x
helper_tensor_for_max = helper_tensor_for_max.view(original_shape_helper_tensor)
x, _ = torch.max(helper_tensor_for_max, dim=1) # num_scenes x num_patches x features_size
x = x.view(-1, features_size) # num_scenes * num_patches x features_size
if activation == "relu": # redundant with previous relu
x = self.relu(x)
patch_latent_vectors = x.view(-1, self.num_patches, self.patch_latent_size) # num_scenes x num_patches x patch_latent_size
patch_xyz = patch_xyz.squeeze(-1)
repeated_patch_latent_vectors = patch_latent_vectors.repeat(1, input["num_samp_per_scene"], 1).view(-1, self.patch_latent_size) # num_scenes * samp_per_scene * num_patches x patch_latent_size
input["standard_input"] = torch.cat([repeated_patch_latent_vectors, patch_xyz.view(-1, 3)], 1) # num_scenes * samp_per_scene * num_patches x (patch_latent_size + 3)
optimize_input_of_patch_network = True
if optimize_input_of_patch_network:
samples_in_patches_mask = patch_weights > self.weight_threshold
input["standard_input"] = input["standard_input"][samples_in_patches_mask,:]
else: # single patch
mixture_latent_vectors = input["mixture_latent_vectors"] # num_scenes x mixture_latent_size
if mixture_latent_mode == "all_explicit":
patch_latent_vectors = mixture_latent_vectors
elif mixture_latent_mode == "all_implicit":
intermediate = mixture_latent_vectors # num_scenes x mixture_size # implicit (patch latent, position, rotation, scaling)
for i, (layer_type, _, activation, dropout) in enumerate(self.mixture_to_patch_parameters["layers"]):
if layer_type == "FC":
layer = getattr(self, "object_to_patch_FC_" + str(i))
intermediate = layer(intermediate)
if dropout > 0.:
dropout = getattr(self, "object_to_patch_dropout_" + str(i))
intermediate = dropout(intermediate)
if activation == "relu":
intermediate = self.relu(intermediate)
elif activation == "elu":
intermediate = self.elu(intermediate)
patch_latent_vectors = intermediate # num_scenes x patch_size
# input["mixture_latent_vectors"]: num_scenes x mixture_latent_size
repeated_patch_latent_vectors = patch_latent_vectors.view(-1, 1, self.patch_latent_size).repeat(1, input["num_samp_per_scene"], 1).view(-1, self.patch_latent_size) # num_scenes * samp_per_scene * num_patches x patch_latent_size
# input["xyz"]: num_scenes x samp_per_scene x 3
input["standard_input"] = torch.cat([repeated_patch_latent_vectors, input["xyz"].view(-1, 3)], 1) # num_scenes * samp_per_scene * num_patches x (patch_latent_size + 3)
patch_network_input = input["standard_input"] # samples x (patch_latent_size + 3) (might be fewer samples if input is optimized)
skip_patch_net = patch_network_input.shape[0] == 0 or ("current_stage" in input and input["current_stage"] == "1" and self.num_patches > 1)
if not skip_patch_net:
x = self.patch_network_forward(patch_network_input, extra_loss=extra_loss) # samples x 1
if self.num_patches > 1:
if skip_patch_net:
patch_sdfs = 0.
else:
# undo subampled/optimized network input & undo scaling
if optimize_input_of_patch_network:
all_samples = torch.zeros((samples_in_patches_mask.shape[0], x.shape[1]), device=device) # samples x 1
all_samples[samples_in_patches_mask,:] = x
all_samples_original_shape = all_samples.shape
all_samples = all_samples.view(patch_scaling.shape[0], input["num_samp_per_scene"], self.num_patches)
all_samples *= patch_scaling.unsqueeze(1).squeeze(-1)
all_samples = all_samples.view(all_samples_original_shape)
all_samples[~samples_in_patches_mask,:] = 1.
x = all_samples
else:
raise RuntimeError("check code again")
x_original_shape = x.shape
x = x.view(patch_scaling.shape[0], input["num_samp_per_scene"], self.num_patches)
x *= patch_scaling.unsqueeze(1).squeeze(-1)
x = x.view(x_original_shape)
patch_sdfs = x.view(-1, self.num_patches)
patch_weights = patch_weights.view(-1, self.num_patches)
if self.loss_on_patches_instead_of_mixture or self.script_mode:
if "sdf_gt" in input:
if mixture_latent_mode == "all_explicit":
direct_patch_weight = 1.
else:
direct_patch_weight = 10.
if self.use_curriculum_weighting:
curriculum_deepsdf_lambda = 0.5
curriculum_clamping_distance = 0.1
clamped_patch_sdfs = torch.clamp(patch_sdfs.detach(), -curriculum_clamping_distance, +curriculum_clamping_distance)
clamped_sdf_gt = torch.clamp(input["sdf_gt"].view(-1, 1).detach(), -curriculum_clamping_distance, +curriculum_clamping_distance)
first_weight = torch.sign(clamped_sdf_gt) # samples x 1
second_weight = torch.sign(clamped_sdf_gt - clamped_patch_sdfs) # samples x num_patches
curriculum_weights = 1. + curriculum_deepsdf_lambda * first_weight * second_weight # samples x num_patches
patch_mask = patch_weights == 0.
patch_recon = patch_sdfs - input["sdf_gt"].view(-1, 1) # broadcast across patches
patch_recon[patch_mask] = 0.
if self.use_curriculum_weighting:
patch_recon *= curriculum_weights
patch_recon = patch_recon.view(-1, input["num_samp_per_scene"], self.num_patches)
patch_recon = torch.abs(patch_recon)
patch_recon = patch_recon / (torch.sum((~patch_mask).view(-1, input["num_samp_per_scene"], self.num_patches), dim=1).unsqueeze(1).float() + 0.000001)
direct_patch_loss = torch.sum(patch_recon, dim=1) # num_scenes x num_patches
extra_loss["direct_patch"] = direct_patch_weight * torch.mean(direct_patch_loss)
else:
direct_patch_loss = None # dummy value
mixture_type = "convex"
if mixture_type == "convex":
weighted_sdfs = patch_weights * patch_sdfs
weighted_sdfs = torch.sum(weighted_sdfs, 1)
normalization = torch.sum(patch_weights, 1)
# hacky
default_sdf_value = 1.0
mask = normalization == 0.
weighted_sdfs[mask] = default_sdf_value
weighted_sdfs[~mask] = weighted_sdfs[~mask] / normalization[~mask]
weighted_sdfs = weighted_sdfs.unsqueeze(1)
x = weighted_sdfs
##elif mixture_type == "closest":
# # only works if patch_weights are monotonically falling with distance
# patch_assignment = torch.argmax(patch_weights, dim=1)
# x = patch_sdfs[:, patch_assignment]
if self.do_code_regularization:
if mixture_latent_mode == "all_explicit" or mixture_latent_mode == "patch_explicit_meta_implicit":
extra_loss["latent_regularization_patch"] = torch.mean(patch_latent_vectors.pow(2))
if mixture_latent_mode == "all_implicit":
extra_loss["latent_regularization_object"] = torch.mean(mixture_latent_vectors.pow(2))
elif mixture_latent_mode == "patch_explicit_meta_implicit":
extra_loss["latent_regularization_posrot"] = torch.mean(posrot_latents.pow(2))
if self.keep_scales_small:
if fix_metadata_regression_by_decreasing_patch_scale:
if "current_stage" in input and input["current_stage"] == "1":
small_scales_weight = 20.
else:
if mixture_latent_mode == "all_implicit":
small_scales_weight = 0.2
elif mixture_latent_mode == "all_explicit":
small_scales_weight = 0.01
else:
raise RuntimeError()
else:
if mixture_latent_mode == "all_implicit":
small_scales_weight = 0.2
elif mixture_latent_mode == "all_explicit":
small_scales_weight = 0.01
else:
raise RuntimeError()
extra_loss["small_scales"] = small_scales_weight * torch.mean(patch_scaling**2)
if self.scales_low_variance:
variances = torch.var(patch_scaling.view(-1, self.num_patches), dim=-1, unbiased=False)
if mixture_latent_mode == "all_implicit":
low_variance_weight = 50.
elif mixture_latent_mode == "all_explicit":
low_variance_weight = 0.01
else:
raise RuntimeError()
extra_loss["low_variance_scales"] = low_variance_weight * torch.mean(variances)
if self.pull_free_space_patches_to_surface or self.pull_patches_to_uncovered_surface:
surface_sdf_threshold = 0.02
if "sdf_gt" in input:
sdf_gt = input["sdf_gt"].squeeze(-1).flatten()
surface_mask = torch.abs(sdf_gt) <= surface_sdf_threshold
if "extra_losses_mask" in input and input["extra_losses_mask"] is not None: # used for hierarchical representation
surface_mask = surface_mask[input["extra_losses_mask"]]
if self.pull_free_space_patches_to_surface:
free_space_distance_threshold = 0.2 * self.non_variable_patch_radius
free_space_loss_weight = 5.0
if "sdf_gt" in input and "num_samp_per_scene" in input:
masked_distances = unscaled_center_distances.clone().view(-1, self.num_patches) #distances: samples * num_patches
masked_distances[~surface_mask,:] = 10000000.
masked_distances = masked_distances.view(-1, input["num_samp_per_scene"], self.num_patches) # num_scenes x samples_per_scene x num_patches
closest_surface_distances, closest_surface_indices = torch.min(masked_distances, dim=1) # num_scenes x num_patches
free_space_patches = closest_surface_distances > free_space_distance_threshold
closest_surface_distances[~free_space_patches] = 0.
free_space_scene_normalization = torch.sum(free_space_patches, dim=1) # num_scenes
free_space_scenes = free_space_scene_normalization > 0
eps = 0.001
free_space_scene_losses = torch.sum(closest_surface_distances[free_space_scenes,:], dim=1) / (free_space_scene_normalization[free_space_scenes].float() + eps) # num_scenes
free_space_loss = torch.sum(free_space_scene_losses) / (torch.sum(free_space_scenes) + eps)
extra_loss["free_space"] = free_space_loss_weight * free_space_loss
if self.align_patch_rotation_with_normal and "normals" in input:
# surface normal should be aligned with local z-axis of patch coordinate system
if not self.pull_free_space_patches_to_surface:
masked_distances = unscaled_center_distances.clone().view(-1, self.num_patches) #distances: samples * num_patches
masked_distances[~surface_mask,:] = 10000000.
masked_distances = masked_distances.view(-1, input["num_samp_per_scene"], self.num_patches) # num_scenes x samples_per_scene x num_patches
closest_surface_distances, closest_surface_indices = torch.min(masked_distances, dim=1) # num_scenes x num_patches
normals = input["normals"] # num_scenes x samp_per_scene x 3
index_helper = np.repeat(np.arange(normals.shape[0]), repeats=self.num_patches) # e.g. for three patches and four scenes: [0,0,0,1,1,1,2,2,2,3,3,3]
#closest_surface_indices # shape: num_scenes x num_patches. indexes samples_per_scene
target_zaxis = normals[index_helper, closest_surface_indices.view(-1), :] # num_scenes * num_patches x 3
target_zaxis = target_zaxis.view(-1, self.num_patches, 3) # num_scenes x num_patches x 3
#rotations # num_scenes x num_patches x 3 x 3. local-to-global coordinates
regressed_zaxis = rotations[:,:,:,2] # num_scenes x num_patches x 3
dot_product = torch.sum(regressed_zaxis * target_zaxis, dim=-1) # num_scenes x num_patches
rotation_loss = (1. - dot_product)**2
rotation_loss_weight = 1.
extra_loss["rotation_alignment"] = rotation_loss_weight * torch.mean(rotation_loss)
if self.pull_patches_to_uncovered_surface:
pull_weight_threshold = 0. # if 0. --> effectively normalization == 0
pull_std = 0.05
loss_weight = 200.0
if "sdf_gt" in input and "num_samp_per_scene" in input:
sum_weights = normalization
weight_mask = sum_weights.detach() <= pull_weight_threshold
pull_mask = weight_mask * surface_mask # logical AND. shape: samples
pull_mask = pull_mask.unsqueeze(1).repeat(1, self.num_patches).view(-1) # shape: samples * num_patches
use_scaled_distances = False
if use_scaled_distances:
distances_to_use = scaled_center_distances # num_scenes x samp_per_scene x num_patches
else:
# unscaled_center_distances_nonflat: num_scenes x samp_per_scene x num_patches
distances_to_use = unscaled_center_distances_nonflat - patch_scaling.unsqueeze(1).squeeze(-1)# * self.patch_clamping_radius
distances_to_use = torch.clamp(distances_to_use, min=0.) # remove negative entries, i.e. samples inside patches
distances_to_use = distances_to_use.flatten()
eps = 0.0001
normalize_weights = True
if normalize_weights:
pull_distances = torch.zeros_like(distances_to_use) # distances: samples * num_patches
pull_distances[pull_mask] = distances_to_use[pull_mask]
pull_weights = torch.exp( -0.5 * (pull_distances/pull_std)**2 ) / pull_std # samples * num_patches
pull_weights[~pull_mask] = 0.
pull_weights = pull_weights.view(-1, self.num_patches)
pull_normalization = torch.sum(pull_weights, 1) # samples
norm_mask = pull_normalization == 0.
pull_weights[norm_mask,:] = 0.0
pull_weights[~norm_mask,:] = pull_weights[~norm_mask,:] / (pull_normalization[~norm_mask].unsqueeze(-1) + eps)
pull_weights = pull_weights.view(-1)
else:
pull_distances = distances_to_use[pull_mask] # distances: samples * num_patches
pull_weights = torch.exp( -0.5 * (pull_distances/pull_std)**2 ) / pull_std
weighted_pulls = (pull_weights * pull_distances).view(-1, input["num_samp_per_scene"], self.num_patches)
weighted_pulls = torch.sum(weighted_pulls, dim=-1) # num_scenes x samples_per_scene
norm_mask = norm_mask.view(-1, input["num_samp_per_scene"])
norm_mask = torch.sum(norm_mask, dim=1) # num_scenes
norm_scenes_mask = norm_mask > 0
coverage_scene_losses = torch.sum(weighted_pulls[norm_scenes_mask,:], dim=1) / (norm_mask[norm_scenes_mask].float() + eps) # num_scenes
coverage_loss = torch.sum(coverage_scene_losses) / (torch.sum(norm_scenes_mask) + eps)
extra_loss["uncovered"] = loss_weight * coverage_loss
else: # single patch
if self.do_code_regularization:
extra_loss["latent_regularization_object"] = torch.mean(input["mixture_latent_vectors"].pow(2))
if "first_stage_pos" in input and input["first_stage_pos"] is not None:
target_positions = input["first_stage_pos"] # num_scenes x num_patches x 3
target_rotations = input["first_stage_rot"] # num_scenes x num_patches x 3
target_scales = input["first_stage_scale"] # num_scenes x num_patches x 1
target_rotations = self._convert_euler_to_matrix(target_rotations.view(-1, 3)).view(-1, self.num_patches, 3, 3)
target_zaxis = target_rotations[:,:,:,2]
regressed_zaxis = rotations[:,:,:,2]
dot_product = torch.sum(regressed_zaxis * target_zaxis, dim=-1) # num_scenes x num_patches
rotation_loss_weight = 0.
rotation_loss = (1. - dot_product)**2
scale_weight = 30.
scale_loss = (target_scales - patch_scaling)**2
position_weight = 3.
position_loss = (target_positions - patch_position)**2
extra_loss["metadata_rotation"] = rotation_loss_weight * torch.mean(rotation_loss)
extra_loss["metadata_position"] = position_weight * torch.mean(position_loss)
extra_loss["metadata_scale"] = scale_weight * torch.mean(scale_loss)
if self.script_mode and self.num_patches > 1:
self.patch_network_input = patch_network_input
self.patch_latent_vectors = patch_latent_vectors
self.patch_positions = patch_position
self.patch_rotations = patch_rotation
self.patch_scalings = patch_scaling
self.patch_network_sdfs = patch_sdfs
self.patch_network_mixture_weights = patch_weights
self.patch_network_mixture_normalization = normalization
self.direct_patch_loss = direct_patch_loss
if self.num_patches > 1:
extra_outputs = {}
extra_outputs["patch_positions"] = patch_position
extra_outputs["patch_rotations"] = patch_rotation
extra_outputs["patch_scalings"] = patch_scaling
# always return SDF/x as the first result, else adapt deep_sdf.utils.decode_sdf and potentially other code!
if input["use_patch_encoder"]:
regressed_mixture_vectors = torch.cat([patch_latent_vectors, patch_position, patch_rotation, patch_scaling], dim=2)
regressed_mixture_vectors = regressed_mixture_vectors.view(-1, 1, self.num_patches * (self.patch_latent_size + 3 + 3 + 1))
return x, regressed_mixture_vectors, extra_loss
elif self.use_depth_encoder:
return x, extra_loss, extra_outputs, mixture_latent_vectors
else:
if "return_extra_outputs" in input and input["return_extra_outputs"]:
return x, extra_loss, extra_outputs
else:
return x, extra_loss
| {
"alphanum_fraction": 0.6092788438,
"author": null,
"avg_line_length": 52.8891797557,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "65108ba5efc0183e50b0f8f338d9fbd903b3a97f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-01-22T08:33:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-29T07:07:10.000Z",
"max_forks_repo_head_hexsha": "1d20bfd1ec74f8c6687939c6092642685fdeee94",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "edgar-tr/patchnets",
"max_forks_repo_path": "code/networks/deep_sdf_decoder.py",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "1d20bfd1ec74f8c6687939c6092642685fdeee94",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T13:25:28.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-05-17T10:05:53.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "edgar-tr/patchnets",
"max_issues_repo_path": "code/networks/deep_sdf_decoder.py",
"max_line_length": 286,
"max_stars_count": 23,
"max_stars_repo_head_hexsha": "1d20bfd1ec74f8c6687939c6092642685fdeee94",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "edgar-tr/patchnets",
"max_stars_repo_path": "code/networks/deep_sdf_decoder.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-31T09:18:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-05T15:38:45.000Z",
"num_tokens": 13096,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 60611
} |
import numpy as np
import pandas as pd
from typing import List
import functools
from tqdm import tqdm
class Equation():
"""
An Equation is a wrapper for a generic functions used in integration
"""
def __init__(self, function, parameters : List[str] = [], output_variable : str = None, string : str = ''):
self._function = function
# set parameters
if parameters == []:
self.parameters = [i for i in self._function.__annotations__.keys() if i != 'return']
else:
self.parameters = parameters
# set output vairable
if output_variable != None:
self.output = output_variable
else:
self.output = self._function.__annotations__.get('return', None)
if self.output == None:
raise ValueError('Set output using function annotation or with output_variable kwarg!')
# set description
self._string = f'{function.__name__} := {string}'
def __call__(self, input_dict):
parameters = {}
for key in self.parameters:
parameters[key] = input_dict[key]
return self._function(**parameters)
@property
def string(self):
return self._string
def __repr__(self):
return self.string
class History():
"""
An instance of History contains data from a Simulation instance and records it
along with metadata for analysis afterwards.
"""
def __init__(self, sim_obj, **kwargs):
self._simulation = sim_obj
self.data = {}
self.initialise()
def initialise(self):
# read variables and setup data dictionary
self.data = {}
for key, value in self._simulation.variables.items():
self.data[key] = [value]
def update(self):
for key, value in self._simulation.variables.items():
self.data[key].append(value)
return
@property
def dataframe(self) -> pd.DataFrame:
df = pd.DataFrame(self.data)
times = [i * self.meta['constants']['dt'] for i in range(self._simulation.timestep + 1)]
df['t'] = times
return df
@property
def meta(self) -> dict:
data = {
'constants' : self._simulation.constants,
'equations' : [i.string for i in self._simulation.equations]
}
return data
class Simulation():
"""
A simulation integrates over a given timestep to solve Equations in order as a function
of time.
"""
def __init__(self, dt, constants={}, variables={}, equations : List[Equation] = [], **kwargs):
self._constants = constants
self._constants['dt'] = dt
self._variables = variables
self._equations = equations
self.timestep = 0
self.history = History(self, **kwargs)
@property
def string(self) -> str:
"""Printable string summarising model"""
output = ""
return output
def integrate(self):
for equation in self.equations:
self._variables[equation.output] = equation(self.inputs)
self.timestep += 1
self.history.update()
return
def add_equation(self, equation : Equation):
self._equations.append(equation)
self.history.initialise()
@property
def inputs(self):
return {**self.constants, **self.variables}
@property
def constants(self):
return self._constants
@property
def variables(self):
return self._variables
@property
def equations(self):
return self._equations
def run(self, n_timesteps):
for i in tqdm(range(n_timesteps)):
self.integrate()
return
if __name__ == '__main__':
print('Running Test...')
dictionary = {'t' : 10}
print('Testing functions with dict vs kwargs:')
def test_fn_dict(input_dict):
value = input_dict.get('t', 1)
return f'\tUsing test_fn_dict: {value}'
print(test_fn_dict(dictionary))
def test_fn_kwargs(t=1):
value = t
return f'\tUsing test_fn_kwargs: {value}'
print(test_fn_kwargs(**dictionary))
print('Setting up Equation:')
def func(M : float = 1.0, R : float = 1.0, dt : float = 1.0) -> 'R':
"""M x (R^-2) x dt"""
print(f'\tfrom func: M={M}, R={R}, dt={dt}')
if R == 0:
return 0.0
return R - (1./(M * R ** 2)) * dt
equation = Equation(func, string=func.__doc__)
result = equation({
'M' : 1,
'R' : 2,
'dummy' : 3,
'dt' : 1
})
print(f'\tResult from equation: {result}')
print(f'\tString from equation: {equation.string}')
print('Setting up simulation:')
simu = Simulation(
0.1,
variables= {
'M' : 5,
'R' : 10
},
equations = [equation]
)
print(f'\tsimu.variables: {simu.variables}')
print(f'\tsimu.constants: {simu.constants}')
print(f'\tsimu.inputs: {simu.inputs}')
print(f'\tsimu.equations: {simu.equations}')
print(f'\tsimu.history.meta: {simu.history.meta}')
print(f'\tsimu.history.dataframe:\n{simu.history.dataframe}')
print('Doing a single step:')
simu.integrate()
print(f'\tsimu.history.dataframe:\n{simu.history.dataframe}')
print('Doing multiple steps:')
simu.run(100)
print(f'\tsimu.history.dataframe:\n{simu.history.dataframe}')
print(f'Simulation with 2 equations:')
def func_2(R : float = 1) -> 'M':
"""M=R/2"""
return R/2
equation_2 = Equation(func_2, string=func_2.__doc__)
simu = simu = Simulation(
0.1,
variables= {
'M' : 5,
'R' : 10
},
equations = [equation, equation_2]
)
simu.run(100)
print(f'\tsimu.history.dataframe:\n{simu.history.dataframe}')
| {
"alphanum_fraction": 0.5848960109,
"author": null,
"avg_line_length": 27.5399061033,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3550c796137cdc3ddd3aff812d157456ffc5b485",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-23T17:15:23.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-23T17:15:23.000Z",
"max_forks_repo_head_hexsha": "bc5b2e00a04d11319c85e749f9c056b75b450ff7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "debeshmandal/brownian",
"max_forks_repo_path": "hydrogels/theory/models/integrator/engine.py",
"max_issues_count": 24,
"max_issues_repo_head_hexsha": "bc5b2e00a04d11319c85e749f9c056b75b450ff7",
"max_issues_repo_issues_event_max_datetime": "2021-12-31T18:46:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-06-04T13:48:57.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "debeshmandal/brownian",
"max_issues_repo_path": "hydrogels/theory/models/integrator/engine.py",
"max_line_length": 111,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "bc5b2e00a04d11319c85e749f9c056b75b450ff7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "debeshmandal/brownian",
"max_stars_repo_path": "hydrogels/theory/models/integrator/engine.py",
"max_stars_repo_stars_event_max_datetime": "2021-02-12T13:37:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-13T01:07:30.000Z",
"num_tokens": 1438,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5866
} |
module SpringCollab2020ChangeThemeTrigger
using ..Ahorn, Maple
@mapdef Trigger "SpringCollab2020/ChangeThemeTrigger" ChangeThemeTrigger(x::Integer, y::Integer,
width::Integer=Maple.defaultTriggerWidth, height::Integer=Maple.defaultTriggerHeight, enable::Bool=true)
const placements = Ahorn.PlacementDict(
"Change Theme Trigger (Spring Collab 2020)" => Ahorn.EntityPlacement(
ChangeThemeTrigger,
"rectangle",
),
)
end
| {
"alphanum_fraction": 0.7349137931,
"author": null,
"avg_line_length": 29,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "e91f4b300741a24f26d5633c9168395ce3f662e4",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 12,
"max_forks_repo_forks_event_max_datetime": "2021-02-21T22:12:42.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-01-07T18:50:42.000Z",
"max_forks_repo_head_hexsha": "93ba2c1b0654c74b57cb48c46577c2b0ca322aa2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "EverestAPI/SpringCollab2020",
"max_forks_repo_path": "Ahorn/triggers/changeThemeTrigger.jl",
"max_issues_count": 117,
"max_issues_repo_head_hexsha": "93ba2c1b0654c74b57cb48c46577c2b0ca322aa2",
"max_issues_repo_issues_event_max_datetime": "2020-05-22T20:59:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-01-07T19:04:02.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "EverestAPI/SpringCollab2020",
"max_issues_repo_path": "Ahorn/triggers/changeThemeTrigger.jl",
"max_line_length": 109,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "93ba2c1b0654c74b57cb48c46577c2b0ca322aa2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "EverestAPI/SpringCollab2020",
"max_stars_repo_path": "Ahorn/triggers/changeThemeTrigger.jl",
"max_stars_repo_stars_event_max_datetime": "2020-02-03T09:53:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-01-07T22:02:34.000Z",
"num_tokens": 111,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 464
} |
// library headers
#include <linuxdeploy/core/log.h>
#include <boost/filesystem.hpp>
#include <util.h>
// local headers
#include "WebEnginePluginsDeployer.h"
using namespace linuxdeploy::plugin::qt;
using namespace linuxdeploy::core::log;
namespace bf = boost::filesystem;
bool WebEnginePluginsDeployer::deploy() {
// calling the default code is optional, but it won't hurt for now
if (!BasicPluginsDeployer::deploy())
return false;
ldLog() << "Deploying web engine plugins" << std::endl;
const auto newLibexecPath = appDir.path() / "usr/libexec/";
// make sure directory is there before trying to write a qt.conf file
bf::create_directories(newLibexecPath);
for (bf::directory_iterator i(qtLibexecsPath); i != bf::directory_iterator(); ++i) {
auto &entry = *i;
const std::string prefix = "QtWeb";
auto fileName = entry.path().filename();
// skip files which don't start with prefix
if (!strStartsWith(fileName.string(), prefix))
continue;
if (!appDir.deployExecutable(*i, newLibexecPath))
return false;
}
for (const auto &fileName : {"qtwebengine_resources.pak",
"qtwebengine_devtools_resources.pak",
"qtwebengine_resources_100p.pak",
"qtwebengine_resources_200p.pak", "icudtl.dat"}) {
auto path = qtDataPath / "resources" / fileName;
if (bf::is_regular_file(path))
appDir.deployFile(path, appDir.path() / "usr/resources/");
}
if (bf::is_directory(qtTranslationsPath / "qtwebengine_locales")) {
for (bf::directory_iterator i(qtTranslationsPath / "qtwebengine_locales"); i != bf::directory_iterator(); ++i) {
appDir.deployFile(*i, appDir.path() / "usr/translations/qtwebengine_locales/");
}
}
const auto qtConfPath = newLibexecPath / "qt.conf";
std::ofstream ofs(qtConfPath.string());
if (!ofs) {
ldLog() << LD_ERROR << "Failed to open" << qtConfPath << "for writing" << std::endl;
return false;
}
ofs << "# generated by linuxdeploy" << std::endl
<< "[Paths]" << std::endl
<< "Prefix = ../" << std::endl;
return true;
}
| {
"alphanum_fraction": 0.6136962248,
"author": null,
"avg_line_length": 32.0845070423,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "a3f414788cfe0de9c62f60d4dc45174fcea3bf71",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 16,
"max_forks_repo_forks_event_max_datetime": "2022-01-11T11:05:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-06-10T15:07:06.000Z",
"max_forks_repo_head_hexsha": "7dcddc5e6ef458ce2153b3fdeab4bc688ba36ea9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "walterbrebels/linuxdeploy-plugin-qt",
"max_forks_repo_path": "src/deployers/WebEnginePluginsDeployer.cpp",
"max_issues_count": 91,
"max_issues_repo_head_hexsha": "7dcddc5e6ef458ce2153b3fdeab4bc688ba36ea9",
"max_issues_repo_issues_event_max_datetime": "2022-03-24T22:06:42.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-07-18T05:19:27.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "walterbrebels/linuxdeploy-plugin-qt",
"max_issues_repo_path": "src/deployers/WebEnginePluginsDeployer.cpp",
"max_line_length": 120,
"max_stars_count": 45,
"max_stars_repo_head_hexsha": "7dcddc5e6ef458ce2153b3fdeab4bc688ba36ea9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "walterbrebels/linuxdeploy-plugin-qt",
"max_stars_repo_path": "src/deployers/WebEnginePluginsDeployer.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-03-27T09:33:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-10-10T08:35:22.000Z",
"num_tokens": 550,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2278
} |
% -*- root: 00-main.tex -*-
\section{Discussion}
\label{sec:regseg-discussion}
We present \regseg{}, a simultaneous segmentation and registration method that
maps a set of nested surfaces into a multivariate target-image.
The nonlinear registration process evolves driven by the fitness of the
piecewise-smooth classification of voxels in the target volume imposed
by the current mapping of the surfaces.
We propose \regseg{} to map anatomical information extracted from \gls*{t1}
images into the corresponding \gls*{dmri} of the same subject.
Previously, joint segmentation and registration has been applied successfully to
other problems such as longitudinal object tracking \citep{paragios_level_2003}
and atlas-based segmentation \citep{gorthi_active_2011}.
The most common approach involves optimizing a deformation model (registration)
that supports the evolution of the active contours (segmentation),
like \cite{paragios_level_2003,yezzi_variational_2003}.
% Conversely, in structure-informed segmentation, the sources of variability are the geometrical distortions
% after imaging and the anatomical evolution in longitudinal studies.}
\Regseg{} can be seen as a particular case of atlas-based segmentation-registration methods,
replacing the atlas by the structural image of the subject (\emph{structure-informed segmentation}).
The main difference of atlas-based segmentation and the application at hand is the resolution of the
target image.
Atlas-based segmentation is typically applied on structural and high-resolution images.
A comprehensive review of joint segmentation and registration methods applied in atlas-based
segmentation is found in \citep{gorthi_active_2011}.
They also propose a multiphase level-set function initialized from a labeled atlas to implement
the active contours that drive the atlas registration.
Alternatively, \regseg{} implements the active contours with a hierarchical set of explicit
surfaces (triangular meshes) instead of the multiphase level sets, and registration
is driven by shape-gradients \citep{herbulot_segmentation_2006}.
As an advantage, the use of explicit surfaces enables segmenting \gls*{dmri} images
with accuracy below \revcomment[R\#3-C10]{voxel} size.
An important antecedent of \regseg{} is \emph{bbregister} \citep{greve_accurate_2009}.
The tool has been widely adopted as the standard registration method to be used along with the \gls*{epi}
correction of choice.
It implements a linear mapping and uses 3D active contours \emph{with edges} to
search for intensity boundaries in the \lowb{} image.
The active contours are initialized using surfaces extracted from the \gls*{t1} using
\emph{FreeSurfer} \citep{fischl_freesurfer_2012}.
To overcome the problem of nonlinear distortions, \emph{bbregister} excludes from the
boundary search those regions that are typically warped.
Indeed, the distortion must be addressed separately because it is not supported by
the affine transformation model.
Conversely, the deformation model of \regseg{} is nonlinear and the active contours are
\emph{without edges} \citep{chan_active_2001} since the \gls*{fa} and \gls*{adc} maps
do not present steep image gradients (edges) but the anatomy can be identified
by looking for piece-wise smooth homogeneous regions.
Recently, \cite{guyader_combined_2011} proposed a simultaneous segmentation and
registration method in 2D using level sets and a nonlinear elasticity smoother on the
displacement vector field, which preserves the topology even with very large deformations.
\Regseg{} includes an anisotropic regularizer for the displacement field described by
\cite{nagel_investigation_1986}.
This regularization strategy conceptually falls in the midway between the Gaussian smoothing
generally included in most of the existing methodologies, and the complexity of
the elasticity smoother of \cite{guyader_combined_2011}.
Other minor features that differ from current methods in joint segmentation and registration are
the support of multivariate target-images and the efficient computation of the shape-gradients
implemented with sparse matrices.
We verified that precise segmentation and registration of a set of surfaces into multivariate
data is possible on digital phantoms.
We randomly deformed four different phantom models to mimic three homogeneous regions
(\gls*{wm}, \gls*{gm}, and \acrlong*{csf}) and we used them to simulate \gls*{t1}
and \gls*{t2} images at two resolution levels.
We measured the Hausdorff distance between the contours projected using the
ground-truth warping and the estimations found with \regseg{}.
We concluded that the errors were significantly lower than the voxel size.
We also assessed the 95\% \gls*{ci}, which yielded an aggregate interval of
0.64--0.66 [mm] for the low resolution phantoms (2.0 mm isotropic voxel) and
0.34--0.38 [mm] for the high resolution phantoms (1.0 mm isotropic).
Therefore, the error was bounded above by half of the voxel size.
The distributions of errors along surfaces varied importantly depending on the shape of the
phantom (see \autoref{fig:regseg-phantom}B).
The misregistration error of the ``gyrus'' phantom showed a much lower spread than that
for the other shapes.
We argue that the symmetry of those other shapes posed difficulties in driving the contours
towards the appropriate region due to \emph{sliding} displacements between the
surfaces and their ground-truth position.
The effect is not detectable by the active contours framework, but it is controllable
increasing the regularization constraints.
When \regseg{} is applied on real datasets, this surface sliding is negligible for the
convoluted nature of cortical surfaces and the directional restriction of the
distortion.
We evaluated \regseg{} in a real environment using the experimental framework presented
in \autoref{fig:regseg-evworkflows}.
We processed 16 subjects from the \gls*{hcp} database using both \regseg{}
and an in-house replication of the \acrfull*{t2b} method.
\Regseg{} obtained a high accuracy, with an aggregate 95\% \gls*{ci} of 0.56--0.66 [mm], which was
below the \revcomment[R\#3-C10]{voxel} size of 1.25 mm.
The misregistration error that remained after \regseg{} was significantly lower ($p <$ 0.01) than the
error corresponding to the \gls*{t2b} method according to Kruskal-Wallis H-tests
(\autoref{tab:results_real}).
Visual inspections of all the results \citepalias[section S5]{esteban_useful_2016} and the violin plots in
\autoref{fig:regseg-results_real} confirmed that \regseg{} achieved higher accuracy
than the \gls*{t2b} method in our settings.
We carefully configured the \gls*{t2b} method using the same algorithm and the
same settings employed in a widely-used tool for \gls*{dmri} processing.
However, cross-comparison experiments are prone to the so-called \emph{instrumentation bias}
\citep{tustison_instrumentation_2013}.
Therefore, these results did not prove that \regseg{} \emph{is better than} \gls*{t2b},
but indicated that \regseg{} is a reliable option in this application field.
Finally, we also proposed a piecewise-smooth segmentation model defined by
a selection of nested surfaces to partition the multispectral space
comprehending the \gls*{fa} and the \gls*{adc} maps and ultimately identify anatomical
structures in \gls*{dmri} space.
We also demonstrated the smoothness of the objective function on five of the real datasets
\citepalias[figure S2]{esteban_useful_2016}, taking advantage of the directional
restriction of possible distortions.
However, \regseg{} requires densely sampled surfaces to ensure the convergence.
Using the digital phantoms, we severely decimated the surfaces by a large factor.
These surfaces introduced a bias which displaced the zero of the gradients from the
minimum of the objective function impeding the convergence.
The proposed application of the method in the task of identifying structural information
in \gls*{dmri} images is an active field of research \citep{jeurissen_tissuetype_2015}.
Current processing of \gls{dmri} involved in the connectome extraction and other applications
(such as \gls*{tbss} or surgical planning) require a precise segmentation
of the anatomical structures in the diffusion space.
Some examples of these processing tasks are the structure-informed reconstruction of \gls*{dmri}
data \citep{jeurissen_multitissue_2014,daducci_accelerated_2015}, the anatomically constrained
tractography \citep{smith_anatomicallyconstrained_2012}, and the imposition of the cortical
parcellation mapped from the \gls*{t1} image \citep{hagmann_mapping_2008}.
The problem was firstly addressed using image segmentation approaches in the native diffusion
space, without definite and compelling results.
With the introduction of retrospective correction methods for the \emph{\gls*{epi} distortions}
and image registration approaches, the task has been typically solved in a two-step approach.
First, the \glspl*{dwi} are corrected for \emph{\gls*{epi} distortions} by estimating
the nonlinear deformation field from extra MR acquisitions
\citep{jezzard_correction_1995,chiou_simple_2000,cordes_geometric_2000,kybic_unwarping_2000}.
Second, mapping the structural information from the corresponding \gls*{t1} image
using a linear registration tool like \emph{bbregister} \citep{greve_accurate_2009}.
The current activity on improving correction methods \citep{irfanoglu_drbuddi_2015} and
the comeback of segmentation of \gls*{dmri} in its native space
\citep{jeurissen_tissuetype_2015} proof the open interest of this application.
\Regseg{} addresses this joint problem in a single step and it does not require any additional
acquisition other than the minimal protocol comprehending only \gls*{t1} and \gls*{dmri} images.
This situation is commonly found in historical datasets.
We envision \regseg{} to be integrated in diffusion processing pipelines, after a
\revcomment[R\#3-C10]{preliminary}
\gls*{dti} computation and before anatomically-informed reconstruction and tractography
methods.
Since the structural information is projected into the native space of \gls*{dmri},
these two processes and the matrix building task can be performed on the unaltered
\gls*{dmri} signal (i.e. without resampling data to an undistorted space).
For analyses other than connectivity, like \gls*{tbss}, the deformation estimated by \regseg{}
can be used to map the tracts into structural space.
\revcomment[R\#3-C5]{%
Even though we apply \regseg{} to the problem of susceptibility distortion,
\emph{it is not a distortion correction method}, but rather a surface alignment method.
In fact, the distortions are not corrected in the \gls*{epi} data.}
\revcomment[R\#3-C6]{%
Therefore, we suggest here to perform the reconstruction and tractography processes in the
original (distorted) diffusion data.}
\revcomment[R\#1-C3]{%
\Regseg{} allows to avoid resampling and/or unwarping of the diffusion signal because the
structural information necessary in the diffusion analysis is mapped from the
reference space.}
\revcomment[R\#1-C1]{%
Certain applications (like \gls*{tbss}) and methodologies (like building the connectivity matrix
by clustering the tracks) may not be performed correctly on the native (distorted) diffusion space
because they still need a mapping to the undistorted space.
Using \regseg{}, the tracks obtained in native space can be unwarped using the resulting estimation
of the deformation field.}
\revcomment[R\#3-C6]{%
This methodological variation will be further investigated, to ensure which processing design
yields the most accurate tractography results.}
Beyond the presented application on \gls*{dmri} data, \regseg{} can be indicated in situations
where there are precise surfaces delineating the structure, a target multivariate
image in which the surfaces must be fitted, and the mapping between the surfaces and
the volume encodes relevant physiological information, such as the normal/abnormal
development or the macroscopic dynamics of organs and tissues.
For instance, \regseg{} may be applied in fields like neonatal brain image segmentation
in longitudinal MRI studies of the early developmental patterns \citep{shi_neonatal_2010}.
In these studies, the surfaces obtained in a mature time point of the brain are retrospectively
propagated to the initial time points, regardless of the changes in the contrast and spatial
development between them.
More generally, \regseg{} may also be applied to the personalized study of longitudinal alteration
of the brain using multispectral images, for instance in the case of traumatic brain
injury \citep{irimia_structural_2014} or in monitoring brain tumors
\citep{weizman_semiautomatic_2014}.
| {
"alphanum_fraction": 0.8011036841,
"author": null,
"avg_line_length": 69.1720430108,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "4035875e086d554f9b1917e64b2c771d426dd992",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "434aba23a032a373b287fe72939cbfe4a6caedca",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "oesteban/RegSeg-NeuroImage2016",
"max_forks_repo_path": "2015-NeuroImage/06-discussion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "434aba23a032a373b287fe72939cbfe4a6caedca",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "oesteban/RegSeg-NeuroImage2016",
"max_issues_repo_path": "2015-NeuroImage/06-discussion.tex",
"max_line_length": 108,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "434aba23a032a373b287fe72939cbfe4a6caedca",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "oesteban/RegSeg-NeuroImage2016",
"max_stars_repo_path": "2015-NeuroImage/06-discussion.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3083,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 12866
} |
""" Module for data loading and manipulation.
"""
import os
import warnings
from typing import Any
from typing import Dict
from typing import Union
import numpy as np
import pandas as pd
from numpy.lib.stride_tricks import sliding_window_view
from .utils import check_ndim
__all__ = [
'load_data_3d',
'load_data',
'temporalize',
'detemporalize',
'sample_normal',
'sample_bernoulli'
]
def load_data_3d(
path: str,
*,
timesteps: int,
format: Dict[str, Any] = {},
verbose: bool = True
) -> np.ndarray:
"""Loads data from `path` and temporalizes it with `timesteps`.
Args:
path (str): Path to file.
timesteps (int): Widow size.
format (Dict[str, Any]): Format args for pd.read_csv.
verbose (bool): Whether to print status information.
Returns:
np.ndarray: Returns the data.
Example:
>>> # Loading 2d data and reshaping it to 3d
>>> X_train = load_data_3d(path='./data/train.csv', timesteps=10)
>>> X_train.shape
(100, 10, 18)
"""
df = load_data(path=path, verbose=verbose, **format)
return temporalize(data=df, timesteps=timesteps, verbose=verbose)
def load_data(path: str, *, verbose: bool = True, **kwargs) -> pd.DataFrame:
"""Loads data from `path`.
Args:
path (str): Path to csv.
format (Dict[str, Any]): Keywords for pd.read_csv.
Returns:
pd.DataFrame: Returns the loaded data.
Example:
>>> # Loading data from a csv file with custom seperator
>>> data = load_data('./data/train.csv', sep=',')
Loaded train.csv with 1000 rows and 18 columns.
"""
df = pd.read_csv(path, **kwargs)
if verbose:
_, name = os.path.split(path)
rows, cols = df.shape
print(f'Loaded {name} with {rows} rows and {cols} columns.')
return df
def temporalize(
data: Union[pd.DataFrame, np.ndarray],
*,
timesteps: int,
stride: int = 0,
verbose: bool = False
) -> np.ndarray:
"""Transforms a 2 dimensional array (rows, features) into a 3 dimensional
array of shape (new_rows, timesteps, features). The step size along axis 0
can be set with ``stride``. If ``stride=0`` or ``stride=timesteps``, the
operation is equivalent to ``data.reshape(-1, timesteps, features)``.
Note: if rows % timesteps != 0 some rows might be discarded.
Arguments:
data (pd.DataFrame, np.ndarray): Data to transform.
timesteps (int): Number of timesteps.
stride (int): Step size along the first axis (Default: 0).
verbose (bool): Whether to print status information.
Returns:
np.ndarray: Returns an array of shape rows x timesteps x features.
Example:
>>> import numpy as np
>>> import mlnext
>>> # setup data
>>> i, j = np.ogrid[:6, :3]
>>> data = 10 * i + j
>>> print(data)
[[ 0 1 2]
[10 11 12]
[20 21 22]
[30 31 32]
[40 41 42]
[50 51 52]]
>>> # Transform 2d data into 3d
>>> mlnext.temporalize(data=data, timesteps=2, verbose=True)
Old shape: (6, 2). New shape: (3, 2, 3).
[[[ 0 1 2]
[10 11 12]]
[[20 21 22]
[30 31 32]]
[[40 41 42]
[50 51 52]]]
>>> # Transform 2d into 3d with stride=1
>>> mlnext.temporalize(data, timesteps=3, stride=1, verbose=True)
Old shape: (6, 3). New shape: (4, 3, 3).
[[[ 0 1 2]
[10 11 12]
[20 21 22]]
[[10 11 12]
[20 21 22]
[30 31 32]]
[[20 21 22]
[30 31 32]
[40 41 42]]
[[30 31 32]
[40 41 42]
[50 51 52]]]
"""
data = np.array(data)
old_shape = data.shape
check_ndim(data, ndim=2)
if timesteps < 1:
raise ValueError('Timesteps must be greater than 1.')
if stride < 0:
raise ValueError('Stride must be greater than 0.')
if stride > timesteps:
warnings.warn(
f'Reversion with mlnext.detemporalize will result in a loss of '
f'rows (stride: {stride} larger than timesteps: {timesteps}).')
# stride = 0 and stride=timesteps is the same as a simple reshape
# to (rows, timesteps, features) (slice=0 is replaced by timesteps)
stride = stride or timesteps
# sliding view with stride
data = sliding_window_view(
data,
window_shape=(timesteps, data.shape[-1]),
).squeeze(axis=1)[::stride]
if verbose:
print(f'Old shape: {old_shape}. New shape: {data.shape}.')
return data
def detemporalize(
data: np.ndarray,
*,
stride: int = 0,
last_point_only: bool = False,
verbose: bool = False
) -> np.ndarray:
"""
Transforms a 3 dimensional array (rows, timesteps, features) into a 2
dimensional array (new_rows, features). If ``stride`` >= timesteps
or 0, then the operation is equivalent to ``data.reshape(-1, features)``
and new_rows equals rows * timesteps. If 0 < ``stride`` < timesteps, the
stride induced elements will be removed and new_rows equals (rows -
timesteps) * timesteps. If ``last_point_only=True`` then only the last
point in each window is kept and new_rows equals (rows, features).
Arguments:
data (np.ndarray): Array to transform.
stride (np.ndarray): Stride that was used to transform the array from
2d into 3d.
last_point_only (np.ndarray): Whether to only take the last point of
each window.
verbose (bool): Whether to print old and new shape.
Returns:
np.ndarray: Returns an array of shape (rows * timesteps) x features.
Example:
>>> import numpy as np
>>> import mlnext
>>> # setup data
>>> i, j = np.ogrid[:6, :3]
>>> data = 10 * i + j
>>> print(data)
[[ 0 1 2]
[10 11 12]
[20 21 22]
[30 31 32]
[40 41 42]
[50 51 52]]
>>> # Transform 3d data into 2d
>>> data_3d = mlnext.temporalize(data, timesteps=2)
>>> print(data_3d)
[[[ 0 1 2]
[10 11 12]]
[[20 21 22]
[30 31 32]]
[[40 41 42]
[50 51 52]]]
>>> mlnext.detemporalize(data_3d, verbose=True)
Old shape: (3, 2, 3). New shape: (6, 3).
[[ 0 1 2]
[10 11 12]
[20 21 22]
[30 31 32]
[40 41 42]
[50 51 52]]
>>> # Transform 3d data into 2d with stride=1
>>> data_3d = mlnext.temporalize(data,
... timesteps=3, stride=1, verbose=True)
Old shape: (6, 3). New shape: (4, 3, 3).
>>> print(data_3d)
[[[ 0 1 2]
[10 11 12]
[20 21 22]]
[[10 11 12]
[20 21 22]
[30 31 32]]
[[20 21 22]
[30 31 32]
[40 41 42]]
[[30 31 32]
[40 41 42]
[50 51 52]]]
>>> mlnext.detemporalize(data_3d, stride=1, verbose=True)
Old shape: (4, 3, 3). New shape: (6, 3).
[[ 0 1 2]
[10 11 12]
[20 21 22]
[30 31 32]
[40 41 42]
[50 51 52]]
>>> # Take only the last point from each window
>>> mlnext.detemporalize(data_3d, last_point_only=True, verbose=True)
Old shape: (4, 3, 3). New shape: (4, 3).
[[20 21 22]
[30 31 32]
[40 41 42]
[50 51 52]]
"""
data = np.array(data)
if data.ndim < 3:
# nothing to do
return data
check_ndim(data, ndim=3)
rows, timesteps, features = data.shape # (rows, timesteps, features)
if stride < 0:
raise ValueError('Stride must be greater than 0.')
if last_point_only:
# take only the last point in each window
s = slice(timesteps - 1, None, timesteps)
data = data.reshape(-1, features)[s]
else:
# remove stride
step = stride if stride > 0 and stride < timesteps else timesteps
# extract the last window, we need all of it
lw = data[-1]
# take the first `step`-values of each window
data = data[:-1, :step, :].reshape(-1, features)
# concat along axis 0
data = np.r_[data, lw]
if verbose:
print(f'Old shape: {(rows, timesteps, features)}. '
f'New shape: {data.shape}.')
return data
def sample_normal(*, mean: np.ndarray, std: np.ndarray) -> np.ndarray:
"""Samples from a normal gaussian with mu=`mean` and sigma=`std`.
Args:
mean (np.ndarray): Mean of the normal distribution.
std (np.ndarray): Standard deviation of the normal distribution.
Returns:
np.ndarray: Returns the drawn samples.
Example:
>>> # Sample from a normal distribution with mean and standard dev.
>>> sample_normal(mean=[0.1], std=[1])
array([-0.77506174])
"""
return np.random.normal(loc=mean, scale=std)
def sample_bernoulli(mean: np.ndarray) -> np.ndarray:
"""Samples from a bernoulli distribution with `mean`.
Args:
mean (np.ndarray): Mean of the bernoulli distribution.
Returns:
np.ndarray: Returns the drawn samples.
Example:
>>> # Sample from a bernoulli distribution with mean
>>> sample_bernoulli(mean=0.2)
0
"""
return np.random.binomial(n=1, p=mean)
| {
"alphanum_fraction": 0.5588607595,
"author": null,
"avg_line_length": 28.2985074627,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "709d68e63874faa9ea79a598ea95e5d65b9d8a9a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "aff791ace391e46c7cee12e5901090551d7c2103",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "PLCnext/MLnext-Framework",
"max_forks_repo_path": "mlnext/data.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "aff791ace391e46c7cee12e5901090551d7c2103",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "PLCnext/MLnext-Framework",
"max_issues_repo_path": "mlnext/data.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "aff791ace391e46c7cee12e5901090551d7c2103",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "PLCnext/MLnext-Framework",
"max_stars_repo_path": "mlnext/data.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2705,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9480
} |
# -*- coding: utf-8 -*-
import logging
import os
from collections import Counter
from multiprocessing.dummy import Pool as ThreadPool
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from src.globalVariable import GlobalVariable
pd.options.display.float_format = '{0:.3}'.format
class PreferenceAnalytics:
def __init__(self, users_preferences_df, song_df):
self.__logger = logging.getLogger(__name__)
self.__users_preferences_df = users_preferences_df
self.__songs_relevance_df = pd.DataFrame()
self.__users_relevance_df = pd.DataFrame()
self.__song_df = song_df
self.__songs_std_value = 0.0
self.__songs_max_value = 0.0
self.__songs_min_value = 0.0
self.__users_std_value = 0.0
self.__users_max_value = 0.0
self.__users_min_value = 0.0
self.__songs_mean_value = 0.0
self.__users_mean_value = 0.0
self.__songs_median_value = 0.0
self.__users_median_value = 0.0
self.__path_to_save_graphics = 'results/'
if not os.path.exists(self.__path_to_save_graphics):
os.makedirs(self.__path_to_save_graphics)
# Users Methods
def _user_calc(self, users_df):
for index, row in users_df.iterrows():
users_df.at[index, 'global_relevance'] = True if row['total_liked'] >= self.__users_std_value else False
return users_df
def users_make_global_relevance(self, users_count_df):
self.__logger.info("__ Begin: users_make_global_relevance")
pool = ThreadPool(GlobalVariable.processor_number)
users_relevance_df = pool.map(self._user_calc, np.array_split(users_count_df, GlobalVariable.processor_number))
pool.close()
pool.join()
self.__logger.info("__ End: users_make_global_relevance")
return pd.concat(users_relevance_df, sort=False)
def __user_preference_count(self):
value_counts = self.__users_preferences_df['user_id'].value_counts()
resp = value_counts.rename_axis('user_id').reset_index(name='total_liked')
return resp
def user_relevance_with_global_like_std(self):
users_count_df = self.__user_preference_count()
self.__logger.info("__ Begin: user_relevance_with_global_like_std")
self.__users_std_value = users_count_df["total_liked"].std()
self.__users_max_value = users_count_df['total_liked'].max()
self.__users_min_value = users_count_df['total_liked'].min()
self.__users_mean_value = users_count_df['total_liked'].mean()
self.__users_median_value = users_count_df['total_liked'].median()
users_count_df['global_relevance_score'] = (users_count_df['total_liked'] / self.__users_max_value).values
self.__users_relevance_df = self.users_make_global_relevance(users_count_df)
self.__logger.info("__ End: user_relevance_with_global_like_std")
# Song Methods
def _song_calc(self, songs_df):
for index, row in songs_df.iterrows():
songs_df.at[index, 'global_relevance'] = True if row['total_liked'] >= self.__songs_std_value else False
return songs_df
def songs_make_global_relevance(self, songs_count_df):
self.__logger.info("__ Begin: songs_make_global_relevance")
pool = ThreadPool(GlobalVariable.processor_number)
songs_relevance_df = pool.map(self._song_calc, np.array_split(songs_count_df, GlobalVariable.processor_number))
pool.close()
pool.join()
self.__logger.info("__ End: songs_make_global_relevance")
return pd.concat(songs_relevance_df, sort=False)
@staticmethod
def sum_play_counts(df):
return pd.DataFrame(data=[df['play_count'].sum()], columns=['total_liked'], index=[df.loc[0, 'song_id']])
def __song_preference_count(self):
value_counts = self.__users_preferences_df['song_id'].value_counts()
resp = value_counts.rename_axis('song_id').reset_index(name='total_liked')
return resp
def song_relevance_with_global_like_std(self):
songs_count_df = self.__song_preference_count()
self.__logger.info("__ Begin: song_relevance_with_global_like_std")
self.__songs_std_value = songs_count_df["total_liked"].std()
self.__songs_max_value = songs_count_df['total_liked'].max()
self.__songs_min_value = songs_count_df['total_liked'].min()
self.__songs_mean_value = songs_count_df['total_liked'].mean()
self.__songs_median_value = songs_count_df['total_liked'].median()
songs_count_df['global_relevance_score'] = (songs_count_df['total_liked'] / self.__songs_max_value).values
self.__songs_relevance_df = self.songs_make_global_relevance(songs_count_df)
self.__songs_relevance_df = self.__songs_relevance_df.set_index('song_id')
self.__songs_relevance_df['song_id'] = self.__songs_relevance_df.index.values.tolist()
self.__logger.info("__ End: song_relevance_with_global_like_std")
# callers
def run(self):
self.song_relevance_with_global_like_std()
self.user_relevance_with_global_like_std()
def get_users_relevance_preferences_df(self, user_top_n_relevance):
self.__users_relevance_df.sort_values("global_relevance_score", ascending=False)
relevance_users = self.__users_relevance_df[:user_top_n_relevance]
users_relevance_preferences_df = self.__users_preferences_df[
self.__users_preferences_df['user_id'].isin(relevance_users['user_id'].tolist())]
return users_relevance_preferences_df
def get_song_relevance_df(self):
return self.__songs_relevance_df
def print_song_statistical(self):
print('')
print('+ + Total de músicas: ' + str(self.__songs_relevance_df.song_id.size))
print('+ + Total de Reproduções: ' + str(self.__users_preferences_df['play_count'].sum()))
print('+ + Música mais preferida: ' + str(self.__songs_max_value))
print('+ + Música menos preferida: ' + str(self.__songs_min_value))
print('+ + Desvio Padrão das preferencias: ' + str(self.__songs_std_value))
print('- - Desvio Padrão normalizado das preferencias: ' + str(self.__songs_std_value / self.__songs_max_value))
print('+ + Media das preferencias: ' + str(self.__songs_mean_value))
print('+ + Mediana das preferencias: ' + str(self.__songs_median_value))
counted = Counter(self.__songs_relevance_df['global_relevance'].tolist())
print('+ + Relevância musical: ' + str(counted))
print('# # Total de Albuns: ' + str(self.__song_df['album'].nunique()))
print('# # Total de Artists: ' + str(self.__song_df['artist'].nunique()))
def print_user_statistical(self):
print('')
print('- -Total de usuários: ' + str(self.__users_relevance_df.user_id.size))
print('- - Usuário com mais músicas preferidas: ' + str(self.__users_max_value))
print('- - Usuário com menos músicas preferidas: ' + str(self.__users_min_value))
print('- - Desvio Padrão das preferencias: ' + str(self.__users_std_value))
print('- - Desvio Padrão normalizado das preferencias: ' + str(self.__users_std_value / self.__users_max_value))
print('+ + Media das preferencias: ' + str(self.__users_mean_value))
print('+ + Mediana das preferencias: ' + str(self.__users_median_value))
counted = Counter(self.__users_relevance_df['global_relevance'].tolist())
print('- - Usuários Relevantes: ' + str(counted))
print('')
def song_global_relevance_score_histo(self):
x = self.__songs_relevance_df.sort_values(by=['global_relevance_score'])
plt.figure()
data = x['global_relevance_score'].values.tolist()
plt.hist(data, bins=100, alpha=0.5,
histtype='bar', color='steelblue',
edgecolor='black')
plt.xlabel('Number of times a song was added in users preference')
plt.ylabel('Song amount')
plt.grid(axis='y')
plt.savefig(
self.__path_to_save_graphics
+ 'song_global_relevance_score_histo.png', format='png', dpi=300
)
plt.close()
def user_global_relevance_score_histo(self):
x = self.__users_relevance_df.sort_values(by=['global_relevance_score'])
plt.figure()
plt.xlabel('Number of songs in users preference')
plt.ylabel('User amount')
data = x['global_relevance_score'].values.tolist()
plt.hist(data, bins=100, alpha=0.5,
histtype='bar', color='steelblue',
edgecolor='black')
plt.grid(axis='y')
plt.savefig(
self.__path_to_save_graphics
+ 'user_global_relevance_score_histo.png', format='png', dpi=300
)
plt.close()
def make_graphics(self):
self.song_global_relevance_score_histo()
self.user_global_relevance_score_histo()
| {
"alphanum_fraction": 0.6925047333,
"author": null,
"avg_line_length": 48.5351351351,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fa1ae466ef7b9b8fb126a94175e035244e614d8a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "327371fed56008a59b3319d6f3826578a95163db",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DiegoCorrea/machine_recommender",
"max_forks_repo_path": "src/preprocessing/preferences_analytics.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "327371fed56008a59b3319d6f3826578a95163db",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DiegoCorrea/machine_recommender",
"max_issues_repo_path": "src/preprocessing/preferences_analytics.py",
"max_line_length": 120,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "327371fed56008a59b3319d6f3826578a95163db",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DiegoCorrea/machine_recommender",
"max_stars_repo_path": "src/preprocessing/preferences_analytics.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2071,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8979
} |
import threading
import sys, os
import ctypes
from collections import OrderedDict
import logging
import importlib
import pprint
from pathlib import Path
import numpy as np
from qtpy import QtGui, QtWidgets, QtCore
from qtpy.QtCore import Qt
from .. import config, gui, __release__, PROGNAME, DOC_HTML
from ..core import conf
from ..console import restart
from .about import AboutScreen
from ..panels.base import BasePanel
from ..ezdock.laystruct import LayoutStruct
from ..dicttree.widgets import DictionaryTreeDialog
logger = logging.getLogger(__name__)
respath = Path(config['respath'])
class NewPanelMenu(QtWidgets.QMenu):
def __init__(self, parent=None, showIcon=False):
super().__init__('New', parent)
if showIcon:
self.setIcon(QtGui.QIcon(str(respath / 'icons' / 'px16' / 'application_add.png')))
@property
def panels(self):
return QtWidgets.QApplication.instance().panels
def showEvent(self, event):
self.showpos = QtGui.QCursor().pos()
self.initactions()
def initactions(self):
self.clear()
panelClasses = BasePanel.userPanelClasses()
self.liveActions = []
for category, catPanelClasses in panelClasses.items():
catMenu = QtWidgets.QMenu(category)
self.addMenu(catMenu)
for panelClass in catPanelClasses:
if not panelClass.userVisible: continue
try:
panelShortName = panelClass.panelShortName
except:
panelShortName = 'unkown'
action = QtWidgets.QAction(f'{panelShortName} <{panelClass.__qualname__}>')
if hasattr(panelClass, 'classIconFile'):
action.setIcon(QtGui.QIcon(panelClass.classIconFile))
action.triggered.connect(CachedArgCall(self.newPanel, panelClass, self.parent().windowName, self.showpos))
catMenu.addAction(action)
self.liveActions.append(action)
def newPanel(self, panelClass, windowName, showpos=None):
if panelClass.panelCategory == 'plot':
import pylab
fig = pylab.figure()
else:
self.panels.new_panel(panelClass, windowName)
class ShowMenu(QtWidgets.QMenu):
def __init__(self, parent=None, showIcon=False):
super().__init__('Panel', parent)
if showIcon:
self.setIcon(QtGui.QIcon(str(respath / 'icons' / 'px16' / 'application_get.png')))
def showEvent(self, event):
self.initactions()
@property
def panels(self):
return QtWidgets.QApplication.instance().panels
def preview(self):
self.previews = PanelsFloatingPreviews()
self.previews.preview()
self.previews.exec_()
self.previews.selectedPanel.select()
self.previews.selectedPanel.show_me()
def initactions(self):
self.clear()
self.liveActions = []
action = QtWidgets.QAction(f'Previews...\t{config["shortcuts"]["panel preview"]}', triggered=self.preview)
self.addAction(action)
self.liveActions.append(action)
self.addSeparator()
for category in self.panels.keys():
panels = self.panels[category]
selected_panid = self.panels.selected(category, panel=False)
for panid in sorted(panels.keys()):
panel = self.panels[category][panid]
action = QtWidgets.QAction(panel.windowTitle())
action.triggered.connect(CachedArgCall(self.showPanel, panel))
action.setCheckable(True)
if panel.panid == selected_panid:
action.setChecked(True)
else:
action.setChecked(False)
self.addAction(action)
self.liveActions.append(action)
def showPanel(self, panel):
panel.show_me()
panel.select()
class WindowMenu(QtWidgets.QMenu):
def __init__(self, parent=None, showIcon=False):
super().__init__('Window', parent)
if showIcon:
self.setIcon(QtGui.QIcon(str(respath / 'icons' / 'px16' / 'application_double.png')))
@property
def windows(self):
return QtWidgets.QApplication.instance().windows
def showEvent(self, event):
self.initactions()
def preview(self):
self.previews = WindowsFloatingPreviews()
self.previews.preview()
self.previews.exec_()
def initactions(self):
self.clear()
self.liveActions = []
action = QtWidgets.QAction(f'Previews...\t{config["shortcuts"]["window preview"]}', triggered=self.preview)
self.addAction(action)
self.liveActions.append(action)
self.addSeparator()
for window_name in self.windows.keys():
window = self.windows[window_name]
action = QtWidgets.QAction(window_name)
action.triggered.connect(CachedArgCall(self.showWindow, window))
self.addAction(action)
self.liveActions.append(action)
def showWindow(self, window):
window.show()
window.raise_()
class CachedArgCall(object):
def __init__(self, caller, *args, **kwargs):
self.caller = caller
self.args = args
self.kwargs = kwargs
def __call__(self):
self.caller(*self.args, **self.kwargs)
class LayoutMenu(QtWidgets.QMenu):
def __init__(self, parent=None, showIcon=False):
super().__init__('Layout', parent)
if showIcon:
self.setIcon(QtGui.QIcon(str(respath / 'icons' / 'px16' / 'layout_content.png')))
self.initactions()
@property
def panels(self):
return QtWidgets.QApplication.instance().panels
def showEvent(self, event):
self.initactions()
def initactions(self):
self.clear()
action = QtWidgets.QAction('Save Layout...', self, triggered=self.saveLayout)
self.addAction(action)
self.addSeparator()
self.addLayoutActions(self)
def addLayoutActions(self, parent=None):
shortcuts = dict((v,k) for k,v in config['shortcuts']['layout'].items())
prefix = config['shortcuts']['layout']['prefix']
for name, layout in config['layout'].items():
shortcut = shortcuts.get(name, None)
if shortcut is None:
action = QtWidgets.QAction(name, parent)
else:
action = QtWidgets.QAction(f'{name}\t{prefix}{shortcut}', parent)
caller = self.panels.restore_state_from_config
action.triggered.connect(CachedArgCall(caller, name))
parent.addAction(action)
def saveLayout(self):
layout_name = gui.dialog.getstring('Give it a name')
if layout_name == '': return
if layout_name == 'base':
gui.dialog.msgbox(f"You can't overwrite {layout_name}", icon='warn')
else:
config['layout'][layout_name] = gui.qapp.panels.ezm.get_perspective()
class MainDialog(QtWidgets.QMainWindow):
def __init__(self, panels):
super().__init__()
self.setWindowTitle(f'{PROGNAME} {__release__}')
self.panels = panels
self.tabs = QtWidgets.QTabWidget(self)
self.setCentralWidget(self.tabs)
self.panels_layout = PanelsLayout(self, panels)
self.tabs.addTab(self.panels_layout, 'Layout')
self.initMenu()
self.callerWindow = None
@property
def qapp(self):
return QtWidgets.QApplication.instance()
@property
def windowName(self):
return None
def initMenu(self):
self.appMenu = self.menuBar().addMenu("&Application")
act = QtWidgets.QAction("Restart", self,
triggered=self.restart,
statusTip=f"Restart {PROGNAME}",
icon=QtGui.QIcon(str(respath / 'icons' / 'px16' / 'recycle.png')))
self.appMenu.addAction(act)
act = QtWidgets.QAction("Exit", self, shortcut=QtGui.QKeySequence.Quit,
statusTip=f"Exit {PROGNAME}",
triggered=self.qapp.quit,
icon=QtGui.QIcon(str(respath / 'icons' / 'px16' / 'door_out.png')))
self.appMenu.addAction(act)
self.newMenu = NewPanelMenu(self)
self.menuBar().addMenu(self.newMenu)
self.showMenu = ShowMenu(self)
self.menuBar().addMenu(self.showMenu)
self.windowMenu = WindowMenu(self)
self.menuBar().addMenu(self.windowMenu)
self.layoutMenu = LayoutMenu(self)
self.menuBar().addMenu(self.layoutMenu)
self.configMenu = self.menuBar().addMenu("Config")
self.configMenu.addAction(QtWidgets.QAction("View Config", self, triggered=self.showConfig,
icon=QtGui.QIcon(str(respath / 'icons' / 'px16' / 'page_gear.png'))))
self.configMenu.addAction(QtWidgets.QAction("Save Config", self, triggered=self.saveConfig))
#matplotlib.rcsetup.all_backends
#'module://gdesk.matplotbe'
self.helpMenu = self.menuBar().addMenu("Help")
helpAct = QtWidgets.QAction("&Help", self, triggered=self.help)
helpAct.setIcon(QtGui.QIcon(str(respath / 'icons' / 'px16' / 'help.png')))
self.helpMenu.addAction(helpAct)
aboutGhQtAct = QtWidgets.QAction(f"About {PROGNAME}", self, triggered=self.about)
self.helpMenu.addAction(aboutGhQtAct)
self.helpMenu.addAction(QtWidgets.QAction("License", self, triggered=self.license))
infoGhQtAct = QtWidgets.QAction("Instance Info", self, triggered=self.info)
infoGhQtAct.setIcon(QtGui.QIcon(str(respath / 'icons' / 'px16' / 'information.png')))
self.helpMenu.addAction(infoGhQtAct)
aboutQtAct = QtWidgets.QAction("About Qt", self, triggered=self.qapp.aboutQt)
self.helpMenu.addAction(aboutQtAct)
def refresh(self):
self.panels_layout.refresh()
def exec_(self, callerWindow=None):
self.callerWindow = callerWindow
self.raise_()
self.qapp.setActiveWindow(self)
self.showNormal()
def accept(self):
self.showMinimized()
def restart(self):
restart()
#os.execlp(sys.executable, 'python', '-m', 'gdesk')
def showConfig(self):
dt = DictionaryTreeDialog(config)
dt.edit()
def saveConfig(self):
path = gui.putfilename('JSON (*.json)', file=config['save_config_file'])
conf.save_config_json(path)
def help(self):
print("Opening %s" % DOC_HTML)
os.system('start "help" "%s"' % DOC_HTML)
def about(self):
aboutScreen = AboutScreen()
aboutScreen.exec_()
def license(self):
message = open(respath / 'LICENSE.txt', 'r').read()
print(message)
self.qapp.panels['console'][0].show_me()
def info(self):
message = self.qapp.cmdserver.host_info()
print(message)
self.qapp.panels['console'][0].show_me()
def closeEvent(self, event):
allHidden = True
for window in self.qapp.windows.values():
if window.isVisible():
allHidden = False
break
if allHidden:
event.accept()
else:
self.showMinimized()
self.callerWindow = None
event.ignore()
class PanelsFloatingPreviews(QtWidgets.QDialog):
def __init__(self):
super().__init__()
self.thumbs = []
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
font = self.font()
font.setPointSize(font.pointSize() * 2)
self.caption = QtWidgets.QLabel('Panels')
self.caption.setFont(font)
self.caption.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
vbox.addWidget(self.caption)
self.boxlay = QtWidgets.QGridLayout()
vbox.addLayout(self.boxlay)
self.setWindowFlag(Qt.FramelessWindowHint)
self.setWindowFlag(Qt.WindowStaysOnTopHint)
@property
def panels(self):
return QtWidgets.QApplication.instance().panels
def preview(self):
total = sum((len(pans) for cat, pans in self.panels.items()))
colcount = int((total*16/9)**0.5)
index = 0
for cat in self.panels.keys():
selectedId = self.panels.selected(cat, panel=False)
for panid in sorted(self.panels[cat].keys()):
panel = self.panels[cat][panid]
pixmap = panel.grab().scaled(160, 160, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
thumb = QtWidgets.QToolButton()
thumb.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
thumb.setIcon(QtGui.QIcon(pixmap))
thumb.setIconSize(pixmap.rect().size())
thumb.setText(panel.short_title)
if panel.panid == selectedId:
thumb.setDown(True)
thumb.setToolTip(panel.long_title)
thumb.clicked.connect(CachedArgCall(self.showPanel, panel))
self.thumbs.append(thumb)
self.boxlay.addWidget(thumb, index // colcount, index % colcount)
index += 1
def showPanel(self, panel):
for thumb in self.thumbs:
thumb.setParent(None)
thumb.hide()
self.thumbs = []
self.hide()
self.selectedPanel = panel
class WindowsFloatingPreviews(QtWidgets.QDialog):
def __init__(self):
super().__init__()
self.thumbs = []
self.setLayout(QtWidgets.QVBoxLayout())
font = self.font()
font.setPointSize(font.pointSize() * 2)
self.caption = QtWidgets.QLabel('Windows')
self.caption.setFont(font)
self.caption.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.layout().addWidget(self.caption)
self.boxlay = QtWidgets.QGridLayout()
self.layout().addLayout(self.boxlay)
self.setWindowFlag(Qt.FramelessWindowHint)
self.setWindowFlag(Qt.WindowStaysOnTopHint)
@property
def windows(self):
return QtWidgets.QApplication.instance().windows
def preview(self):
total = len(self.windows.items())
colcount = int((total*16/9)**0.5)
index = 0
for window in self.windows.values():
pixmap = window.grab().scaled(160, 160, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
thumb = QtWidgets.QToolButton()
thumb.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
thumb.setIcon(QtGui.QIcon(pixmap))
thumb.setIconSize(pixmap.rect().size())
#thumb.setText(window.windowTitle())
thumb.setText(window.name)
thumb.setToolTip(window.windowTitle())
thumb.clicked.connect(CachedArgCall(self.showWindow, window))
self.thumbs.append(thumb)
self.boxlay.addWidget(thumb, index // colcount, index % colcount)
index += 1
#self.show()
def showWindow(self, window):
window.showNormal()
window.raise_()
for thumb in self.thumbs:
thumb.hide()
self.thumbs = []
self.hide()
class LayoutList(QtWidgets.QListWidget):
def __init__(self, parent):
super().__init__(parent=parent)
self.currentItemChanged.connect(self.changeItem)
def changeItem(self, item):
if item is None: return
layout = config['layout'][item.name]
text = ''
ls = LayoutStruct()
for window in layout["windows"]:
text += f'window: {window["name"]}\n'
ls.root = window["docks"]
text += ls.describe() + '\n\n'
self.parent().parent().preview.setPlainText(text)
class PanelsLayout(QtWidgets.QWidget):
def __init__(self, dialog, panels):
super().__init__(parent=dialog)
self.dialog = dialog
self.panels = panels
self.layout_list = LayoutList(self)
self.preview = QtWidgets.QPlainTextEdit(self)
console_font = QtGui.QFont('Consolas', pointSize=config['console']['fontsize'])
self.preview.setFont(console_font)
self.preview.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.vbox = QtWidgets.QVBoxLayout()
self.setLayout(self.vbox)
self.box = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
self.vbox.addWidget(self.box)
self.box.addWidget(self.layout_list)
self.box.addWidget(self.preview)
self.loadBtn = QtWidgets.QPushButton('Load')
self.loadBtn.clicked.connect(self.loadLayout)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.loadBtn)
self.vbox.addLayout(hbox)
self.refresh()
def refresh(self):
self.layout_list.clear()
shortcuts = dict((v,k) for k,v in config['shortcuts']['layout'].items())
for name, layout in config['layout'].items():
description = layout.get('description', 'no description')
if shortcuts.get(name, None):
description += f"\n [Ctrl+F{shortcuts.get(name, None)}]"
item = QtWidgets.QListWidgetItem(f'{name}:\n {description}')
item.name = name
self.layout_list.addItem(item)
def loadLayout(self):
item = self.layout_list.selectedItems()[0]
self.panels.restore_state_from_config(item.name)
self.dialog.accept()
| {
"alphanum_fraction": 0.6182681852,
"author": null,
"avg_line_length": 32.0605504587,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3fdd873cde56310b146096f6168360b9ac48326e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9cb63a65fe23e30e155b3beca862f369b7fa1b7e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "thocoo/gamma-desk",
"max_forks_repo_path": "gdesk/dialogs/main.py",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "9cb63a65fe23e30e155b3beca862f369b7fa1b7e",
"max_issues_repo_issues_event_max_datetime": "2021-06-09T09:07:18.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-04-09T11:31:43.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "thocoo/gamma-desk",
"max_issues_repo_path": "gdesk/dialogs/main.py",
"max_line_length": 122,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9cb63a65fe23e30e155b3beca862f369b7fa1b7e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "thocoo/gamma-desk",
"max_stars_repo_path": "gdesk/dialogs/main.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3740,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 17473
} |
import numpy as np
import matplotlib.pyplot as plt
max_score = 644
scores1 = np.loadtxt('../../sandbox/leapfrog/test7-npass.dat')
scores2 = np.loadtxt('test-0-npass.txt')
print(scores2[58])
print(scores2[59])
print(scores2[60])
plt.clf()
# plt.plot(np.arange(len(scores1)) + 1, scores1, 'o--b', label='QC Ware Heuristic #1')
plt.plot(np.arange(len(scores2)) + 1, scores2, 'o--b', label='QC Ware Solution')
plt.plot([0, 70], [644, 644], '-k', label='Max Possible Score')
plt.plot([60, 60], [0, 644], '--k', label='SAT Bound')
plt.legend(loc=3)
plt.axis([0, 70, 0, 650])
plt.xlabel('Number of Test Cars in Constellation')
plt.ylabel('Score: Number of Tests Covered by Constellation')
plt.grid(True)
plt.savefig('test.pdf')
| {
"alphanum_fraction": 0.684137931,
"author": null,
"avg_line_length": 30.2083333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "91ae1f6cdf292196829564d96719c7323c6e7587",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "761e405587bffe5dc4ca9f79432a79df2c7fd8f8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "qcware/bmw",
"max_forks_repo_path": "prod-1/2-prod/plot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "761e405587bffe5dc4ca9f79432a79df2c7fd8f8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "qcware/bmw",
"max_issues_repo_path": "prod-1/2-prod/plot.py",
"max_line_length": 86,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "761e405587bffe5dc4ca9f79432a79df2c7fd8f8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "qcware/bmw",
"max_stars_repo_path": "prod-1/2-prod/plot.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 223,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 725
} |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Raimondas Zemblys
@email: r.zemblys@tf.su.lt
"""
#%% imports
import os, sys, copy
from distutils.dir_util import mkpath
from tqdm import tqdm
import numpy as np
import pandas as pd
#import seaborn as sns
#sns.set_style("ticks")
###
import argparse, multiprocessing, json, fnmatch
from datetime import datetime
from sklearn.externals import joblib
from util_lib.etdata import ETData
from util_lib.utils import split_path
from util_lib.irf import extractFeatures, get_i2mc
from util_lib.irf import postProcess
from util_lib.irf import hpp
#%%
def get_arguments():
'''Parses command line arguments
'''
parser = argparse.ArgumentParser(description='Eye-movement event detection '
'using Random Forest.')
parser.add_argument('clf', type=str,
default='irf_2018-03-26_20-46-41',
help='Classifier')
parser.add_argument('root', type=str,
help='The path containing eye-movement data')
parser.add_argument('dataset', type=str,
help='The directory containing experiment data')
parser.add_argument('--ext', type=str, default='npy',
help='File type')
parser.add_argument('--output_dir', type=str, default=None,
help='The directory to save output')
parser.add_argument('--workers', type=int, default=4,
help='Number of workers to use')
parser.add_argument('--save_csv', action='store_true',
help='Save output as csv file')
args = parser.parse_args()
return args
#%% Setup parameters and variables
args = get_arguments()
ROOT_OUTPUT = args.output_dir if not(args.output_dir is None)\
else '%s/%s_irf' % (args.root, args.dataset)
fpath_log = '%s/irf_%s.log'%(ROOT_OUTPUT, datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
db_path = '%s/%s'%(args.root, args.dataset)
n_avail_cores = multiprocessing.cpu_count()
n_jobs = args.workers if not(args.workers is None) else n_avail_cores
etdata = ETData()
pp = hpp()
#%% Main
#load classifier
if args.clf is None:
print 'Classifier not provided'
sys.exit()
else:
print ('Loading model...')
ft, clf = joblib.load('models/%s/model.pkl'%(args.clf))
clf.set_params(n_jobs=n_jobs, verbose=0)
print ('...done')
#load config
with open('config.json', 'r') as f:
config = json.load(f)
with open('%s/db_config.json'%db_path, 'r') as f:
db_config = json.load(f)
config['geom'] = db_config['geom']
#get file list and process data
FILES = []
for _root, _dir, _files in os.walk(db_path):
FILES.extend(['%s/%s' % (_root, _file)
for _file in fnmatch.filter(_files, '*.%s'%args.ext)])
for fpath in tqdm(FILES):
fdir, fname = split_path(fpath)
odir = fdir.replace(db_path, ROOT_OUTPUT)
mkpath(odir)
etdata.load(fpath)
# evt_gt = copy.deepcopy(etdata.data['evt']) #gound truth events
#extract features
if 'i2mc' in ft:
fdir_i2mc = odir.replace(ROOT_OUTPUT, '%s/i2mc'%ROOT_OUTPUT)
fpath_i2mc = '%s/%s_i2mc.mat'%(fdir_i2mc, fname)
i2mc = get_i2mc(etdata, fpath_i2mc, config['geom'])
if i2mc is None:
continue
else:
config['extr_kwargs']['i2mc'] = i2mc
irf_features, pred_mask = extractFeatures(etdata, **config['extr_kwargs'])
if not(len(irf_features)):
with open(fpath_log, 'a') as f:
f.write('EMPTY:\t%s\n'%fpath.replace(ROOT_OUTPUT, ''))
continue
#select required features, transform to matrix and predict
X = irf_features[ft]
X = X.view((np.float32, len(X.dtype.names)))
pred = clf.predict_proba(X)
#probabilistic post-processing
etdata.data['evt'], etdata.data['status'], pred_ = \
postProcess(etdata, pred, pred_mask, **config['pp_kwargs'])
#hard post-processing
etdata.data['evt'], etdata.data['status'], pp_rez, pp_inds = \
pp.run_pp(etdata, **config['pp_kwargs'])
# pp_check.run_pp(etdata, **config['pp_kwargs'])
#save
spath = '%s/%s'%(odir, fname)
etdata.save(spath)
#save csv
if args.save_csv:
data_df = pd.DataFrame(etdata.data)
data_df.to_csv('%s.csv'%spath)
| {
"alphanum_fraction": 0.633233187,
"author": null,
"avg_line_length": 30.9071428571,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "93c28480be6e0143a208d0e8540ed63839f64124",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2021-12-13T16:54:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-05-26T08:21:25.000Z",
"max_forks_repo_head_hexsha": "65f12eae6e5a6878b75a869a85ce36f0fc9117ea",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "r-zemblys/irf",
"max_forks_repo_path": "run_irf.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "65f12eae6e5a6878b75a869a85ce36f0fc9117ea",
"max_issues_repo_issues_event_max_datetime": "2021-05-06T16:34:30.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-07-12T20:23:16.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "r-zemblys/irf",
"max_issues_repo_path": "run_irf.py",
"max_line_length": 87,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "65f12eae6e5a6878b75a869a85ce36f0fc9117ea",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "r-zemblys/irf",
"max_stars_repo_path": "run_irf.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-13T16:54:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-05T13:25:57.000Z",
"num_tokens": 1117,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4327
} |
"""
.. module:: GreenButtonDatasetAdapter
:platform: Unix
:synopsis: Contains methods for importing data following the green button
spec.
.. moduleauthor:: Phil Ngo <ngo.phil@gmail.com>
.. moduleauthor:: Miguel Perez <miguel@invalid.com>
.. moduleauthor:: Stephen Suffian <stephen.suffian@gmail.com>
.. moduleauthor:: Sabina Tomkins <sabina.tomkins@gmail.com>
"""
from appliance import ApplianceTrace
from appliance import ApplianceInstance
from appliance import ApplianceSet
from appliance import ApplianceType
import utils
import sqlalchemy
import pandas as pd
import numpy as np
from xml.parsers.expat import ExpatError
from xml.dom import minidom
from lxml import etree
from datetime import datetime
import warnings
import os.path
import re
def get_trace(xml_string):
'''
Returns an ApplianceTrace representing the data in the XML file, which
must conform to the GreenButtonXML format.
'''
# if not _validate(xml_string):
# raise InvalidXMLError
try:
xmldoc = minidom.parseString(xml_string)
values = xmldoc.getElementsByTagName('value')
datetimes = xmldoc.getElementsByTagName('start')
# TODO - more intelligently handle assumption about duration -> freq
frequency = int(xmldoc.getElementsByTagName('duration')[1]
.childNodes[0].nodeValue)
# remove first extra 'start' time
datetimes.pop(0)
except ExpatError:
print "XML parsing error"
# extrace values
values = [v.childNodes[0].nodeValue for v in values]
datetimes = [datetime.fromtimestamp(int(dt.childNodes[0].nodeValue))
for dt in datetimes]
series = pd.Series(values,index=datetimes)
metadata = {'source': 'GreenButtonXML'}
trace = ApplianceTrace(series,metadata)
# TODO - be more flexible
# set sample rate
if frequency == 60 * 60:
trace = trace.resample('H')
elif frequency == 60 * 30:
trace = trace.resample('30T')
elif frequency == 60 * 15:
trace = trace.resample('15T')
elif frequency == 60:
trace = trace.resample('T')
return trace
def get_zipcode(xml_string):
'''
Returns an ApplianceTrace representing the data in the XML file, which
must conform to the GreenButtonXML format.
'''
try:
xmldoc = minidom.parseString(xml_string)
entry=xmldoc.getElementsByTagName('entry')[0]
address = entry.getElementsByTagName('title')[0].childNodes[0].nodeValue
# find a zipcode
try:
return re.findall(r"\s((\d{5})([-\s]\d{4})?)\s*$", address)[0][1]
except IndexError:
warnings.warn("No zipcode found (IndexError), using 60605")
return "60605"
except ExpatError:
warnings.warn("No zipcode found (ExpatError), using 60604")
return "60604"
def _validate(xml_string):
'''
Validates that the XML is in proper GB format
'''
# TODO - WARNING this does not actually validate anything right now!!!
# Actually it would if you uncommented the etree.fromstring call, but it
# is too strict
schema_file = os.path.abspath(os.path.join(os.path.dirname(
os.path.dirname(__file__)), 'assets','schemas','espiDerived.xsd'))
with open(schema_file, 'r') as f:
schema_root = etree.XML(f.read())
schema = etree.XMLSchema(schema_root)
xmlparser = etree.XMLParser(schema=schema)
try:
# TODO make the validation work
# etree.fromstring(xml_string, xmlparser)
return True
except:
return False
class InvalidXMLError(Exception):
"""
Exception raised for errors in the xml format of the data.
"""
def __init__(self):
pass
def __str__(self):
return '''Improperly formed GreenButton XML file. Please make sure the
file follows the green button xsd specification. '''
| {
"alphanum_fraction": 0.6667519182,
"author": null,
"avg_line_length": 30.0769230769,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "41e1dc8144a83bffba6d80379c934f5d6a01278c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 17,
"max_forks_repo_forks_event_max_datetime": "2020-06-15T14:13:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-01T18:12:04.000Z",
"max_forks_repo_head_hexsha": "ac3a13780bccb001c81d6f8ee27d3f5706cfa77e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "afcarl/wikienergy",
"max_forks_repo_path": "disaggregator/GreenButtonDatasetAdapter.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ac3a13780bccb001c81d6f8ee27d3f5706cfa77e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "afcarl/wikienergy",
"max_issues_repo_path": "disaggregator/GreenButtonDatasetAdapter.py",
"max_line_length": 80,
"max_stars_count": 29,
"max_stars_repo_head_hexsha": "ac3a13780bccb001c81d6f8ee27d3f5706cfa77e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pjkundert/wikienergy",
"max_stars_repo_path": "disaggregator/GreenButtonDatasetAdapter.py",
"max_stars_repo_stars_event_max_datetime": "2021-04-20T08:25:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-08T19:20:37.000Z",
"num_tokens": 915,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3910
} |
from ctypes import *
import numpy as np
from SB import SB,SBTraining
from NB import NB,NBTraining
import config
import GSM as gsm
from convCode import convCode
from interleave import interleave
def c2cf(x):
cx = (c_float*(len(x)*2))()
cx[::2]=x.real[:]
cx[1::2]=x.imag[:]
return cx
def cf2c(output):
return np.array([complex(output[i],output[i+1]) for i in range(0,len(output),2)])
def compress_bits(sbuf):
dbuf = []
for i in range(0,len(sbuf)-7,8):
c = 0
k = 1
for x in sbuf[i:i+8]:
c += k*x
k *= 2
dbuf.append(c)
if i+8<len(sbuf):
c = 0
k = 1
for x in sbuf[i+8:]:
c += k*x
k *= 2
dbuf.append(c)
return dbuf
def buf2uint64(buf):
r = long(0)
for x in buf[::-1]:
r<<=8
r+=long(x)
#print [hex(x) for x in buf],hex(r)
return r
"""
typedef struct burst_s {
int bl;
float osr;
short *recv;
gr_complex frame[1500];
gr_complex chn[3*10];
gr_complex rh[3];
int cut_pos;
float chpower;
gr_complex mafi[148];
int demoduled[148];
int msg[148];
int stolen[2];
} burst_t;
"""
class cburst(Structure):
_fields_ = [
("bl" , c_int)
, ("osr" , c_float)
, ("recv" , c_void_p)
, ("frame" , c_float*3000)
, ("chn" , c_float*120)
, ("rh" , c_float*6)
, ("cut_pos" , c_int)
, ("chpower" , c_float)
, ("mafi" , c_float*296)
, ("demodulated", c_int*148)
, ("msg" , c_int*148)
, ("stolen" , c_int*2)
]
def __init__(self):
self.osr = float(config.SampleRate/gsm.SymbolRate)
def mmap(self,r):
self.recv = addressof(r)
"""
typedef struct sch_s {
burst_t *sb;
unsigned char in_buf[78];
unsigned char outbuf[35];
uint64_t out[2];
} sch_t;
"""
class cSch(Structure):
_fields_ = [
("sb" ,c_void_p)
, ("in_buf" ,c_char*78)
, ("outbuf" ,c_char*35)
, ("out" ,c_uint64*2)
]
def __init__(self,b):
self.sb = addressof(b)
"""
typedef struct cch_s {
burst_t *nb[4];
unsigned char in_buf[1024];
unsigned char outbuf[1024];
uint64_t out[4];
} cch_t;
"""
class cCch(Structure):
_fields_ = [
("nb" ,c_void_p*4)
, ("in_buf" ,c_char*1024)
, ("outbuf" ,c_char*1024)
, ("out" ,c_uint64*4)
]
def __init__(self,bs):
for i in range(len(bs)):
self.nb[i] = addressof(bs[i])
class Trainings(Structure):
_fields_ = [
("sb", c_float*128)
, ("nb", c_float*52*len(NBTraining.bits))
, ("sb_chn_s", c_int)
, ("sb_chn_e", c_int)
, ("nb_chn_s", c_int)
, ("nb_chn_e", c_int)
]
def __init__(self):
cut = 145
self.sb[:] = c2cf(SBTraining.modulated)[:]
for i in range(len(NBTraining.bits)):
self.nb[i][:] = c2cf(NBTraining.modulated[i,:])[:]
self.sb_chn_s = SB._chn_s+cut
self.sb_chn_e = SB._chn_s+cut+60
self.nb_chn_s = NB._chn_s+cut
self.nb_chn_e = NB._chn_s+cut+60
"""
typedef struct CC_s {
uint64_t pp;
uint64_t pr;
int bs;
int ps;
int ts;
int maxE;
} CC_t;
"""
class ConvCodeHandle(Structure):
_fields_ = [
("pp" , c_uint64)
, ("pr" , c_uint64)
, ("bs" , c_int)
, ("ps" , c_int)
, ("ts" , c_int)
, ("ins" , c_int)
, ("maxE", c_int)
, ("ilT" , c_int*(57*8))
]
def __init__(self,config):
self.pp = c_uint64(buf2uint64(compress_bits(config['parity_polynomial'])))
self.pr = c_uint64(buf2uint64(compress_bits(config['parity_remainder'])))
self.bs = c_int(config['DATA_BLOCK_SIZE'])
self.ps = c_int(config['PARITY_SIZE'])
self.ts = c_int(config['TAIL_BITS_SIZE'])
self.ins = self.bs+self.ps+self.ts
self.maxE = self.ins*2+1
il = interleave(57*8,57*2)
self.ilT[:]=il.trans[:]
class clib:
trainings = Trainings()
sch_dec = ConvCodeHandle(convCode.sch_config)
cch_dec = ConvCodeHandle(convCode.cch_config)
def __init__(self,lib):
self.lib = lib
def newBurst(self,r):
b = cburst()
b.bl = len(r)/2
b.mmap(r)
return b
def demodu(self,b,t):
self.lib.demodu(byref(b),byref(clib.trainings),c_int(t))
def doSch(self,b):
aSch = cSch(b)
return self.lib.doSch(byref(aSch),byref(clib.trainings),byref(clib.sch_dec),0),aSch
def doCch(self,b,t):
aCch = cCch(b)
return self.lib.doCch(byref(aCch),byref(clib.trainings),byref(clib.cch_dec),c_int(t+1)),aCch
def cch_deinterleave(self,b):
self.aCch = cCch(b)
self.lib.cch_deinterleave(byref(self.aCch),byref(clib.cch_dec))
def viterbi_detector(self,mafi,rhh,bs):
filtered_burst = (c_float*(bs*2))()
stop_states = (c_int*2)()
stop_states[0]=1
stop_states[1]=2
output =(c_int*bs)()
hard =(c_int*bs)()
filtered_burst[::2]=mafi[:bs].real/rhh[2].real
filtered_burst[1::2]=mafi[:bs].imag/rhh[2].real
xrhh =(c_float*6)()
xrhh[::2]=rhh[1:4].real/rhh[2].real
xrhh[1::2]=rhh[1:4].imag/rhh[2].real
self.lib.viterbi_detector(
byref(filtered_burst)
, 148
, byref(xrhh)
, c_int(2)
, byref(stop_states)
, 2
, byref(output)
)
return output[:]
def viterbi_restore(self,x,rhh,bs):
output = (c_float*(bs*2))()
in_put =(c_int*bs)()
in_put[:] = x[:bs]
xrhh =(c_float*6)()
xrhh[::2]=rhh[1:4].real
xrhh[1::2]=rhh[1:4].imag
self.lib.viterbi_restore(
byref(in_put)
, 148
, byref(xrhh)
, c_int(1)
, c_int(0)
, byref(output)
)
return np.array([complex(output[i],output[i+1]) for i in range(0,len(output),2)])
def matchFilter(self,d,h,osr,timing):
cd = c2cf(d)
ch = c2cf(h)
l = int((len(d)-len(h)-1)/osr)
cout = (c_float*(l*2))()
self.lib.matchFilter(
byref(cd), byref(ch)
, c_int(l), c_int(len(h))
, byref(cout)
, c_float(osr), c_float(timing))
return cf2c(cout)
def maxwin(self,b,l):
cb = c2cf(b)
p = self.lib.maxwin(byref(cb),c_int(len(b)),c_int(l))
return p
def channelEst(self,frame,training,osr):
cf = c2cf(frame)
ct = c2cf(training)
l = int(len(frame)-(len(training)-1)*osr)
cout = (c_float*(l*2))()
self.lib.channelEst(
byref(cf)
, byref(ct)
, c_int(len(frame))
, c_int(len(training))
, c_float(osr)
, c_int(l)
, byref(cout)
)
return cf2c(cout)
| {
"alphanum_fraction": 0.6095110516,
"author": null,
"avg_line_length": 22.3670411985,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "648361d0418d45abe51afa9c24ca4ea1bcb31295",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2020-11-28T08:52:37.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-05-09T04:18:29.000Z",
"max_forks_repo_head_hexsha": "bd4e7aeff7f8dcf874324ba458620f411101633e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "RP7/R7-OCM",
"max_forks_repo_path": "src/host/python/gsmlib/clib.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bd4e7aeff7f8dcf874324ba458620f411101633e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "RP7/R7-OCM",
"max_issues_repo_path": "src/host/python/gsmlib/clib.py",
"max_line_length": 94,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "bd4e7aeff7f8dcf874324ba458620f411101633e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "RP7/R7-OCM",
"max_stars_repo_path": "src/host/python/gsmlib/clib.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-26T02:00:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-09T04:18:16.000Z",
"num_tokens": 2267,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5972
} |
/*=============================================================================
Copyright (c) 2001-2009 Hartmut Kaiser
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#if !defined(BOOST_SPIRIT_MATCH_MANIP_MAY_05_2007_1203PM)
#define BOOST_SPIRIT_MATCH_MANIP_MAY_05_2007_1203PM
#if defined(_MSC_VER)
#pragma once
#endif
#include <boost/spirit/home/qi/parse.hpp>
#include <boost/spirit/home/support/unused.hpp>
#include <boost/mpl/bool.hpp>
#include <iterator>
#include <string>
///////////////////////////////////////////////////////////////////////////////
namespace boost { namespace spirit { namespace qi { namespace detail
{
///////////////////////////////////////////////////////////////////////////
template <typename Expr, typename Copy = mpl::false_
, typename Skipper = unused_type, typename Attribute = unused_type const>
struct match_manip
{
match_manip(Expr const& xpr, Skipper const& s, Attribute& a)
: expr(xpr), skipper(s), attr(a), post_skip(skip_flag::postskip) {}
match_manip(Expr const& xpr, Skipper const& s
, BOOST_SCOPED_ENUM(skip_flag) ps, Attribute& a)
: expr(xpr), skipper(s), attr(a), post_skip(ps) {}
Expr const& expr;
Skipper const& skipper;
Attribute& attr;
BOOST_SCOPED_ENUM(skip_flag) const post_skip;
private:
// silence MSVC warning C4512: assignment operator could not be generated
match_manip& operator= (match_manip const&);
};
template <typename Expr, typename Skipper, typename Attribute>
struct match_manip<Expr, mpl::true_, Skipper, Attribute>
{
match_manip(Expr const& xpr, Skipper const& s, Attribute& a)
: expr(xpr), skipper(s), attr(a), post_skip(skip_flag::postskip) {}
match_manip(Expr const& xpr, Skipper const& s
, BOOST_SCOPED_ENUM(skip_flag) ps, Attribute& a)
: expr(xpr), skipper(s), attr(a), post_skip(ps) {}
Expr const& expr;
Skipper const& skipper;
Attribute attr;
BOOST_SCOPED_ENUM(skip_flag) const post_skip;
private:
// silence MSVC warning C4512: assignment operator could not be generated
match_manip& operator= (match_manip const&);
};
///////////////////////////////////////////////////////////////////////////
template<typename Char, typename Traits, typename Expr, typename Copy>
inline std::basic_istream<Char, Traits> &
operator>>(std::basic_istream<Char, Traits> &is,
match_manip<Expr, Copy> const& fm)
{
typedef std::istream_iterator<Char, Char, Traits> input_iterator;
input_iterator f(is);
input_iterator l;
if (!qi::parse(f, l, fm.expr))
{
is.setstate(std::ios_base::failbit);
}
return is;
}
///////////////////////////////////////////////////////////////////////////
template<typename Char, typename Traits, typename Expr, typename Copy
, typename Attribute>
inline std::basic_istream<Char, Traits> &
operator>>(std::basic_istream<Char, Traits> &is,
match_manip<Expr, Copy, unused_type, Attribute> const& fm)
{
typedef std::istream_iterator<Char, Char, Traits> input_iterator;
input_iterator f(is);
input_iterator l;
if (!qi::parse(f, l, fm.expr, fm.attr))
{
is.setstate(std::ios_base::failbit);
}
return is;
}
///////////////////////////////////////////////////////////////////////////
template<typename Char, typename Traits, typename Expr, typename Copy
, typename Skipper>
inline std::basic_istream<Char, Traits> &
operator>>(std::basic_istream<Char, Traits> &is,
match_manip<Expr, Copy, Skipper> const& fm)
{
typedef std::istream_iterator<Char, Char, Traits> input_iterator;
input_iterator f(is);
input_iterator l;
if (!qi::phrase_parse(
f, l, fm.expr, fm.skipper, fm.post_skip))
{
is.setstate(std::ios_base::failbit);
}
return is;
}
///////////////////////////////////////////////////////////////////////////
template<typename Char, typename Traits, typename Expr, typename Copy
, typename Attribute, typename Skipper
>
inline std::basic_istream<Char, Traits> &
operator>>(
std::basic_istream<Char, Traits> &is,
match_manip<Expr, Copy, Attribute, Skipper> const& fm)
{
typedef std::istream_iterator<Char, Char, Traits> input_iterator;
input_iterator f(is);
input_iterator l;
if (!qi::phrase_parse(
f, l, fm.expr, fm.skipper, fm.post_skip, fm.attr))
{
is.setstate(std::ios_base::failbit);
}
return is;
}
}}}}
#endif
| {
"alphanum_fraction": 0.5556443556,
"author": null,
"avg_line_length": 35.75,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "6a56c462c9642ae35d04d64078d38a3a7443f072",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "EricBoittier/vina-carb-docker",
"max_forks_repo_path": "src/lib/boost/spirit/home/qi/stream/detail/match_manip.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "EricBoittier/vina-carb-docker",
"max_issues_repo_path": "src/lib/boost/spirit/home/qi/stream/detail/match_manip.hpp",
"max_line_length": 81,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "EricBoittier/vina-carb-docker",
"max_stars_repo_path": "src/lib/boost/spirit/home/qi/stream/detail/match_manip.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1120,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5005
} |
import re
import numpy
import tempfile
import random
from fractions import Fraction
from .data import *
from .circularity import *
def lispify(lisp, obj):
return lispify_datum(decircularize(obj, lisp.readtable))
def lispify_datum(obj):
lispifier = lispifiers.get(type(obj))
if lispifier:
return lispifier(obj)
elif isinstance(obj, LispWrapper):
return "#{}?".format(obj.handle)
else:
raise RuntimeError("Cannot lispify {}.".format(obj))
def lispify_ndarray(A):
if not A.dtype.hasobject:
return lispify_specialized_ndarray(A)
def rec(A):
if not getattr(A, 'ndim'):
return lispify_datum(A)
if A.ndim == 0:
return " " + lispify_datum(A.item())
else:
return "(" + " ".join(rec(a) for a in A) + ")"
return "#{}A".format(A.ndim) + rec(A)
def lispify_specialized_ndarray(A):
r = random.randrange(2**63-1)
tmp = tempfile.gettempdir() + '/cl4py-array-{}.npy'.format(r)
numpy.save(tmp, A)
return '#N"{}"'.format(tmp)
def lispify_dict(d):
s = "{"
for key, value in d.items():
s += lispify_datum(key) + " " + lispify_datum(value) + " "
return s + "}"
def lispify_str(s):
def escape(s):
return s.translate(str.maketrans({'"':'\\"', '\\':'\\\\'}))
return '"' + escape(s) + '"'
def lispify_tuple(x):
if len(x) == 0:
return "NIL"
else:
# This should never happen, because decircularize implicitly
# converts tuples to cl4py Lists.
raise RuntimeError('Cannot lispify non-empty tuple.')
def lispify_Cons(x):
datum = x
content = ""
while isinstance(datum, Cons):
content += lispify_datum(datum.car) + " "
datum = datum.cdr
if not null(datum):
content += " . " + lispify_datum(datum)
return "(" + content + ")"
def lispify_Symbol(x):
if not x.package:
return "|" + x.name + "|"
else:
return "|" + x.package + "|::|" + x.name + "|"
def lispify_Complex(x):
return "#C(" + lispify_datum(x.real) + " " + lispify_datum(x.imag) + ")"
def lispify_float16(x):
return '{:E}'.format(x).replace('E', 'S')
def lispify_float32(x):
return '{:E}'.format(x)
def lispify_float64(x):
return '{:E}'.format(x).replace('E', 'D')
def lispify_float128(x):
return '{:E}'.format(x).replace('E', 'L')
lispifiers = {
# Built-in objects.
bool : lambda x: "T" if x else "NIL",
type(None) : lambda x: "NIL",
int : str,
float : lispify_float64,
complex : lispify_Complex,
list : lambda x: "#(" + " ".join(lispify_datum(elt) for elt in x) + ")",
Fraction : str,
tuple : lispify_tuple,
str : lispify_str,
dict : lispify_dict,
# cl4py objects.
Cons : lispify_Cons,
Symbol : lispify_Symbol,
Keyword : lispify_Symbol,
SharpsignEquals : lambda x: "#" + str(x.label) + "=" + lispify_datum(x.obj),
SharpsignSharpsign : lambda x: "#" + str(x.label) + "#",
# Numpy objects.
numpy.ndarray : lispify_ndarray,
numpy.str_ : lispify_str,
numpy.int8 : str,
numpy.int16 : str,
numpy.int32 : str,
numpy.int64 : str,
numpy.uint8 : str,
numpy.uint16 : str,
numpy.uint32 : str,
numpy.uint64 : str,
numpy.float16 : lispify_float16,
numpy.float32 : lispify_float32,
numpy.float64 : lispify_float64,
numpy.float128 : lispify_float128,
numpy.complex64 : lispify_Complex,
numpy.complex128 : lispify_Complex,
}
| {
"alphanum_fraction": 0.5769549599,
"author": null,
"avg_line_length": 25.85,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0e933539264a25ee0cec405beb9156201b1999bc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bd40f9aff807e6ad88afc572e5a93e939052f878",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "scymtym/cl4py",
"max_forks_repo_path": "cl4py/writer.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bd40f9aff807e6ad88afc572e5a93e939052f878",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "scymtym/cl4py",
"max_issues_repo_path": "cl4py/writer.py",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bd40f9aff807e6ad88afc572e5a93e939052f878",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "scymtym/cl4py",
"max_stars_repo_path": "cl4py/writer.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1058,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3619
} |
#Loops and functions
===============
#1)
for (i in 1:1000) print (i)
total=0
#2)
for (i in 1:1000) {
total=total+i
}
total
#3)
divideByTwo <- function(x) return(x/2)
divideByTwo(10)
#4)
ninetynine <- function(x) return (99)
#5)
addThem <- function(x,y) return(x+y)
#One take on how to scrape Wikipedia pageviews
getData <- function(url){
#function to download data in json format
require(rjson)
raw.data <- readLines(url, warn="F")
rd <- fromJSON(raw.data)
rd.views <- rd$daily_views
rd.views <- unlist(rd.views)
rd <- as.data.frame(rd.views)
rd$date <- rownames(rd)
rownames(rd) <- NULL
return(rd)
}
getUrls <- function(y1,y2,term){
#function to create a list of urls given a term and a start and endpoint
urls <- NULL
for (year in y1:y2){
for (month in 1:9){
urls <- c(urls,(paste("http://stats.grok.se/json/en/",year,0,month,"/",term,sep="")))
}
for (month in 10:12){
urls <- c(urls,(paste("http://stats.grok.se/json/en/",year,month,"/",term,sep="")))
}
}
return(urls)
}
getStats <- function(y1,y2,terms){
#function to download data for each term
#returns a dataframe
output <- NULL
for (term in terms){
urls <- getUrls(y1,y2,term)
results <- NULL
for (url in urls){
print(url)
results <- rbind(results,getData(url))
}
results$term <- term
output <- rbind(output,results)
}
return(output)
}
visualiseStats <- function(input){
#function to visualise data from the getStats function
require(lubridate)
require(ggplot2)
input$date <- as.Date(input$date)
ggplot(input,aes(date,rd.views,colour=term))+geom_line()
}
input <- getStats(2011,2012,c("Data_mining","Web_scraping"))
visualiseStats(input)
| {
"alphanum_fraction": 0.6292517007,
"author": null,
"avg_line_length": 21,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "1ffed37f673b27f291abb39f2e0c1c9c62522c2e",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "147b03e784ae58a8669b0b4f3a23727b0b5cb9e2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "idcrook/SR_Foundations_DS_Fall_2015",
"max_forks_repo_path": "part4_data_wrangling/data_scraping/SRMC_lectures_in_Web_Scraping_2014/2014/fetchdata2.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "147b03e784ae58a8669b0b4f3a23727b0b5cb9e2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "idcrook/SR_Foundations_DS_Fall_2015",
"max_issues_repo_path": "part4_data_wrangling/data_scraping/SRMC_lectures_in_Web_Scraping_2014/2014/fetchdata2.r",
"max_line_length": 92,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "147b03e784ae58a8669b0b4f3a23727b0b5cb9e2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "idcrook/SR_Foundations_DS_Fall_2015",
"max_stars_repo_path": "part4_data_wrangling/data_scraping/SRMC_lectures_in_Web_Scraping_2014/2014/fetchdata2.r",
"max_stars_repo_stars_event_max_datetime": "2021-02-12T16:24:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-12T16:24:36.000Z",
"num_tokens": 529,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1764
} |
import numpy as np
import matplotlib.pyplot as plt
from simplestat import statinf
f=np.load("model.npz",allow_pickle=True)
model=f["model"]
t=f["t"]
at=f["at"]
print(*[m.shape for m in model])
print(t.shape)
# exit()
def runmodel(inp):
x=inp
for l in model:
x=np.dot(x,l)
x=np.maximum(x,0)
x=np.mean(x,axis=-1)
return x
print("doing 7")
to=runmodel(t)
print("doing ~7")
ato=runmodel(at)
print("done")
print(statinf(to))
print(statinf(ato))
# exit()
plt.hist(to,bins=50,alpha=0.5,label="7")
plt.hist(ato,bins=50,alpha=0.5,label="~7")
plt.legend()
plt.show()
| {
"alphanum_fraction": 0.6632302405,
"author": null,
"avg_line_length": 14.9230769231,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5801aca18c107455d116336ef9e0d9f8fdd14de7",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "86afc22718aded00bbc05e4582fa0a9b6aa3ab25",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "psorus/anogen",
"max_forks_repo_path": "not_so_weird/oneoff/test1.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "86afc22718aded00bbc05e4582fa0a9b6aa3ab25",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "psorus/anogen",
"max_issues_repo_path": "not_so_weird/oneoff/test1.py",
"max_line_length": 42,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "86afc22718aded00bbc05e4582fa0a9b6aa3ab25",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "psorus/anogen",
"max_stars_repo_path": "not_so_weird/oneoff/test1.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 188,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 582
} |
# # 2D linear diffusion Julia MPI solver
using Plots, Printf, MAT
import MPI
# enable plotting by default
if !@isdefined do_save; do_save = true end
# MPI functions
@views function update_halo(A, neighbors_x, neighbors_y, comm)
# Send to / receive from neighbor 1 in dimension x ("left neighbor")
if neighbors_x[1] != MPI.MPI_PROC_NULL
sendbuf = A[2,:]
recvbuf = zeros(size(A[1,:]))
MPI.Send(sendbuf, neighbors_x[1], 0, comm)
MPI.Recv!(recvbuf, neighbors_x[1], 1, comm)
A[1,:] = recvbuf
end
# Send to / receive from neighbor 2 in dimension x("right neighbor")
if neighbors_x[2] != MPI.MPI_PROC_NULL
sendbuf = A[end-1,:]
recvbuf = zeros(size(A[end,:]))
MPI.Send(sendbuf, neighbors_x[2], 1, comm)
MPI.Recv!(recvbuf, neighbors_x[2], 0, comm)
A[end,:] = recvbuf
end
# Send to / receive from neighbor 1 in dimension y ("bottom neighbor")
if neighbors_y[1] != MPI.MPI_PROC_NULL
sendbuf = A[:,2]
recvbuf = zeros(size(A[:,1]))
MPI.Send(sendbuf, neighbors_y[1], 2, comm)
MPI.Recv!(recvbuf, neighbors_y[1], 3, comm)
A[:,1] = recvbuf
end
# Send to / receive from neighbor 2 in dimension y ("top neighbor")
if neighbors_y[2] != MPI.MPI_PROC_NULL
sendbuf = A[:,end-1]
recvbuf = zeros(size(A[:,end]))
MPI.Send(sendbuf, neighbors_y[2], 3, comm)
MPI.Recv!(recvbuf, neighbors_y[2], 2, comm)
A[:,end] = recvbuf
end
return
end
@views function diffusion_2D_mpi(; do_save=false)
# MPI
MPI.Init()
dims = [0,0]
comm = MPI.COMM_WORLD
nprocs = MPI.Comm_size(comm)
MPI.Dims_create!(nprocs, dims)
comm_cart = MPI.Cart_create(comm, dims, [0,0], 1)
me = MPI.Comm_rank(comm_cart)
coords = MPI.Cart_coords(comm_cart)
neighbors_x = MPI.Cart_shift(comm_cart, 0, 1)
neighbors_y = MPI.Cart_shift(comm_cart, 1, 1)
if (me==0) println("nprocs=$(nprocs), dims[1]=$(dims[1]), dims[2]=$(dims[2])") end
# Physics
lx, ly = 10.0, 10.0
λ = 1.0
nt = 100
# Numerics
nx, ny = 32, 32 # local number of grid points
nx_g, ny_g = dims[1]*(nx-2)+2, dims[2]*(ny-2)+2 # global number of grid points
# Derived numerics
dx, dy = lx/nx_g, ly/ny_g # global
dt = min(dx,dy)^2/λ/4.1
# Array allocation
qHx = zeros(nx-1,ny-2)
qHy = zeros(nx-2,ny-1)
# Initial condition
x0, y0 = coords[1]*(nx-2)*dx, coords[2]*(ny-2)*dy
xc = [x0 + ix*dx - dx/2 - 0.5*lx for ix=1:nx]
yc = [y0 + iy*dy - dy/2 - 0.5*ly for iy=1:ny]
H = exp.(.-xc.^2 .-yc'.^2)
t_tic = 0.0
# Time loop
for it = 1:nt
if (it==11) t_tic = Base.time() end
qHx .= .-λ*diff(H[:,2:end-1], dims=1)/dx
qHy .= .-λ*diff(H[2:end-1,:], dims=2)/dy
H[2:end-1,2:end-1] .= H[2:end-1,2:end-1] .- dt*(diff(qHx, dims=1)/dx .+ diff(qHy, dims=2)/dy)
update_halo(H, neighbors_x, neighbors_y, comm_cart)
end
t_toc = (Base.time()-t_tic)
if (me==0) @printf("Time = %1.4e s, T_eff = %1.2f GB/s \n", t_toc, round((2/1e9*nx*ny*sizeof(lx))/(t_toc/(nt-10)), sigdigits=2)) end
# Save to visualise
if do_save file = matopen("$(@__DIR__)/H_$(me).mat", "w"); write(file, "H", Array(H)); close(file) end
MPI.Finalize()
return
end
diffusion_2D_mpi(; do_save=do_save)
| {
"alphanum_fraction": 0.5594763802,
"author": null,
"avg_line_length": 37.3829787234,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "7dd3995f80798db7c5fd87b9a47fe8153747118b",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-12-07T00:32:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-02T12:57:06.000Z",
"max_forks_repo_head_hexsha": "a25865c1d14df81a882fe116413bbbd24ddebdb4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "omlins/julia-gpu-course",
"max_forks_repo_path": "solutions/diffusion_2D_mpi.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a25865c1d14df81a882fe116413bbbd24ddebdb4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "omlins/julia-gpu-course",
"max_issues_repo_path": "solutions/diffusion_2D_mpi.jl",
"max_line_length": 136,
"max_stars_count": 44,
"max_stars_repo_head_hexsha": "a25865c1d14df81a882fe116413bbbd24ddebdb4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "omlins/julia-gpu-course",
"max_stars_repo_path": "solutions/diffusion_2D_mpi.jl",
"max_stars_repo_stars_event_max_datetime": "2022-01-26T19:32:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-01T18:58:47.000Z",
"num_tokens": 1206,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3514
} |
#pragma once
#include <vector>
#include <cmath>
#include <boost/concept_check.hpp>
#include "endian.hpp"
#include "binary.hpp"
namespace hsl
{
namespace detail
{
// From http://stackoverflow.com/questions/485525/round-for-float-in-c
inline double sround(double r) {
return (r > 0.0) ? floor(r + 0.5) : ceil(r - 0.5);
}
/// Compile-time calculation size of array defined statically.
template <typename T, std::size_t N>
inline std::size_t static_array_size(T (&t)[N])
{
boost::ignore_unused_variable_warning(t);
return (sizeof(t) / sizeof(t[0]));
}
/// Simple RAII wrapper.
/// It's dedicated to use with types associated with custom deleter,
/// opaque pointers and C API objects.
template <typename T>
class raii_wrapper
{
typedef void(*deleter_type)(T* p);
public:
raii_wrapper(T* p, deleter_type d)
: p_(p), del_(d)
{
assert(0 != p_);
assert(0 != del_);
}
raii_wrapper& operator=(raii_wrapper const& rhs)
{
if (&rhs != this)
{
p_ = rhs.p_;
del_ = rhs.del_;
}
return *this;
}
~raii_wrapper()
{
do_delete(p_);
}
void reset(T* p)
{
do_delete(p_);
p_= p;
}
T* get() const
{
return p_;
}
void swap(raii_wrapper& other)
{
std::swap(p_, other.p_);
}
private:
raii_wrapper(raii_wrapper const& other);
// raii_wrapper& operator=(raii_wrapper const& rhs);
void do_delete(T* p)
{
assert(del_);
if (0 != p)
del_(p);
}
T* p_;
deleter_type del_;
};
template <typename T>
bool compare_distance(const T& actual, const T& expected);
template <typename T>
struct Point
{
Point()
: x(T()), y(T()), z(T())
{}
Point(T const& x, T const& y, T const& z)
: x(x), y(y), z(z)
{}
bool equal(Point<T> const& other) const
{
return (compare_distance(x, other.x)
&& compare_distance(y, other.y)
&& compare_distance(z, other.z));
}
T x;
T y;
T z;
};
template <typename T>
bool operator==(Point<T> const& lhs, Point<T> const& rhs)
{
return lhs.equal(rhs);
}
template <typename T>
bool operator!=(Point<T> const& lhs, Point<T> const& rhs)
{
return (!lhs.equal(rhs));
}
template <typename T>
bool compare_distance(const T& actual, const T& expected)
{
const T epsilon = std::numeric_limits<T>::epsilon();
const T diff = actual - expected;
if ( !((diff <= epsilon) && (diff >= -epsilon )) )
{
return false;
}
return true;
}
template<typename T>
inline char* as_buffer(T& data)
{
return static_cast<char*>(static_cast<void*>(&data));
}
template<typename T>
inline char* as_buffer(T* data)
{
return static_cast<char*>(static_cast<void*>(data));
}
template<typename T>
inline char const* as_bytes(T const& data)
{
return static_cast<char const*>(static_cast<void const*>(&data));
}
template<typename T>
inline char const* as_bytes(T const* data)
{
return static_cast<char const*>(static_cast<void const*>(data));
}
// adapted from http://www.cplusplus.com/forum/beginner/3076/
template <typename IntegerType>
inline IntegerType bitsToInt(IntegerType& output,
std::vector<uint8_t> const& data,
std::size_t index)
{
binary::endian_value<IntegerType> value;
value.template load<binary::little_endian_tag>(&data[0] + index);
output = value;
return output;
}
template <typename IntegerType>
inline void intToBits(IntegerType input,
std::vector<uint8_t>& data,
std::size_t index)
{
binary::endian_value<IntegerType> value(input);
value.template store<binary::little_endian_tag>(&data[0] + index);
}
}
} | {
"alphanum_fraction": 0.5933556528,
"author": null,
"avg_line_length": 19.7106598985,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "57e1c877312792238cf2c7e9c1cd822d30f82515",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-12-11T13:29:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-12-11T13:29:21.000Z",
"max_forks_repo_head_hexsha": "3f1a203948118fc38a431f57337461da37d10ede",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "zjliucn/libHSL",
"max_forks_repo_path": "include/detail/private_utility.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3f1a203948118fc38a431f57337461da37d10ede",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "zjliucn/libHSL",
"max_issues_repo_path": "include/detail/private_utility.hpp",
"max_line_length": 70,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3f1a203948118fc38a431f57337461da37d10ede",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "zjliucn/libHSL",
"max_stars_repo_path": "include/detail/private_utility.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 995,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3883
} |
import os
import numpy as np
import rasterio
import typer
from rasterio.windows import from_bounds
from .stac import query_stac_api
from .utils import project_point, project_tif
outputs_dir = os.getenv("outputs_dir", "/tmp")
class IMAGE:
def __init__(self, bounds):
typer.secho("Creating image object...", fg=typer.colors.MAGENTA)
self.stac_item = query_stac_api(bounds)
self.algorithm = "B4,B3,B2"
self.band_count = len(self.algorithm.split(","))
self.tif_path = self.apply_algorithm(bounds)
def apply_algorithm(self, bounds):
stac_item = self.stac_item
algorithm = self.algorithm
subsets = []
for band_idx, band_name in enumerate(algorithm.split(",")):
fp = stac_item.get("assets").get(band_name).get("href")
with rasterio.open(fp) as src:
src_crs = src.crs.to_epsg()
dem_proj_bl = project_point(
bounds.left,
bounds.bottom,
from_proj="epsg:4326",
to_proj=f"epsg:{src_crs}",
)
dem_proj_tr = project_point(
bounds.right,
bounds.top,
from_proj="epsg:4326",
to_proj=f"epsg:{src_crs}",
)
dem_proj_bounds = [
dem_proj_bl[0],
dem_proj_bl[1],
dem_proj_tr[0],
dem_proj_tr[1],
]
wdw = from_bounds(*dem_proj_bounds, src.transform)
subsets.append(src.read(1, window=wdw))
clip_width = subsets[band_idx].shape[1]
clip_height = subsets[band_idx].shape[0]
clip_transform = rasterio.transform.from_bounds(
*dem_proj_bounds, clip_width, clip_height
)
img_clip = os.path.join(outputs_dir, "intermediate", "img_orig.tif")
with rasterio.open(
img_clip,
"w",
driver="GTiff",
height=clip_height,
width=clip_width,
count=3,
dtype=subsets[0].dtype,
crs=src_crs,
transform=clip_transform,
) as dst:
for band_idx in range(self.band_count):
dst.write(np.squeeze(subsets[band_idx]), band_idx + 1)
dst_crs = "EPSG:4326"
img_4326 = os.path.join(outputs_dir, "intermediate", "img_4326.tif")
project_tif(img_clip, img_4326, dst_crs)
return img_4326
def write_png(self):
with rasterio.open(self.tif_path) as src:
self.png_bounds = src.bounds
profile = src.profile
profile["driver"] = "PNG"
profile["dtype"] = "uint16"
png_filename = os.path.join(outputs_dir, "intermediate", "img_4326.png")
rasters = []
for i in range(self.band_count):
rasters.append(src.read(i + 1))
rasters = np.stack(rasters)
with rasterio.open(png_filename, "w", **profile) as dst:
uint16 = rasters.astype(np.uint16)
percs = np.percentile(np.moveaxis(uint16, 0, -1), [5, 95], axis=[0, 1])
max_value = np.iinfo(np.uint16).max
norm = (np.moveaxis(uint16, 0, -1) - percs[0]) / (percs[1] - percs[0])
clipped = np.clip(norm, 0, 1)
uint16_scaled = (clipped * max_value).astype(np.uint16)
uint16_scaled = np.moveaxis(uint16_scaled, -1, 0)
dst.write(uint16_scaled)
typer.secho(f"...PNG written to: {png_filename}")
self.png_path = png_filename
| {
"alphanum_fraction": 0.5376401495,
"author": null,
"avg_line_length": 35.6761904762,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d5879306714261a4e9da0f03316e67c0fb11f26e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a4708f97a548691649057ef6b58f01b7002418e0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "phloem7/cdemgl",
"max_forks_repo_path": "cdemgl/modules/image.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a4708f97a548691649057ef6b58f01b7002418e0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "phloem7/cdemgl",
"max_issues_repo_path": "cdemgl/modules/image.py",
"max_line_length": 87,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "a4708f97a548691649057ef6b58f01b7002418e0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "phloem7/cdemgl",
"max_stars_repo_path": "cdemgl/modules/image.py",
"max_stars_repo_stars_event_max_datetime": "2020-09-30T19:46:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-30T19:46:48.000Z",
"num_tokens": 870,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3746
} |
#############################################################################
# HYBRID SYSTEM SAMPLERS
#############################################################################
"""
This is adapted from Perses: https://github.com/choderalab/perses/
See here for the license: https://github.com/choderalab/perses/blob/main/LICENSE
"""
import copy
import warnings
import numpy as np
import openmm
from openmm import unit
from openmmtools.multistate import replicaexchange
from openmmtools import cache
from openmmtools.states import (CompoundThermodynamicState,
SamplerState, ThermodynamicState)
from .lambdaprotocol import RelativeAlchemicalState
class HybridCompatibilityMixin(object):
"""
Mixin that allows the MultistateSampler to accommodate the situation where
unsampled endpoints have a different number of degrees of freedom.
"""
def __init__(self, *args, hybrid_factory=None, **kwargs):
self._hybrid_factory = hybrid_factory
super(HybridCompatibilityMixin, self).__init__(*args, **kwargs)
def setup(self, reporter, platform, lambda_protocol,
temperature=298.15 * unit.kelvin, n_replicas=None,
endstates=True):
"""
Setup MultistateSampler based on the input lambda protocol and number
of replicas.
Parameters
----------
reporter : OpenMM reporter
Simulation reporter to attach to each simulation replica.
platform : openmm.Platform
Platform to perform simulation on.
lambda_protocol : LambdaProtocol
The lambda protocol to be used for simulation. Default to a default
class creation of LambdaProtocol.
temperature : openmm.Quantity
Simulation temperature, default to 298.15 K
n_replicas : int
Number of HREX replicas to simulate. Sets to the number of lambda
states (as defined by lambda_protocol) if ``None``.
Default ``None``.
endstates : bool
Whether or not to generate unsampled endstates (i.e. dispersion
correction).
Attributes
----------
n_states : int
Number of states / windows which are to be sampled. Obtained from
lambda_protocol.
"""
n_states = len(lambda_protocol.lambda_schedule)
# TODO - remove this and move it to our own MD tasks
# from perses.dispersed import feptasks
hybrid_system = self._factory.hybrid_system
lambda_zero_state = RelativeAlchemicalState.from_system(hybrid_system)
thermostate = ThermodynamicState(hybrid_system,
temperature=temperature)
compound_thermostate = CompoundThermodynamicState(
thermostate,
composable_states=[lambda_zero_state])
# create lists for storing thermostates and sampler states
thermodynamic_state_list = []
sampler_state_list = []
context_cache = cache.ContextCache(platform)
if n_replicas is None:
msg = (f"setting number of replicas to number of states: {n_states}")
warnings.warn(msg)
n_replicas = n_states
elif n_replicas > n_states:
wmsg = (f"More sampler states: {n_replicas} requested than the "
f"number of available states: {n_states}. Setting "
"the number of replicas to the number of states")
warnings.warn(wmsg)
n_replicas = n_states
lambda_schedule = lambda_protocol.lambda_schedule
if len(lambda_schedule) != n_states:
errmsg = ("length of lambda_schedule must match the number of "
"states, n_states")
raise ValueError(errmsg)
# starting with the hybrid factory positions
box = hybrid_system.getDefaultPeriodicBoxVectors()
sampler_state = SamplerState(self._factory.hybrid_positions,
box_vectors=box)
# Loop over the lambdas and create & store a compound thermostate at
# that lambda value
for lambda_val in lambda_schedule:
compound_thermostate_copy = copy.deepcopy(compound_thermostate)
compound_thermostate_copy.set_alchemical_parameters(
lambda_val, lambda_protocol)
thermodynamic_state_list.append(compound_thermostate_copy)
# now generating a sampler_state for each thermodyanmic state,
# with relaxed positions
context, context_integrator = context_cache.get_context(
compound_thermostate_copy)
# TODO: move to our own MD tasks
# feptasks.minimize(compound_thermodynamic_state_copy,
# sampler_state)
sampler_state_list.append(copy.deepcopy(sampler_state))
# making sure number of sampler states equals n_replicas
if len(sampler_state_list) != n_replicas:
# picking roughly evenly spaced sampler states
# if n_replicas == 1, then it will pick the first in the list
samples = np.linspace(0, len(sampler_state_list) - 1,
self.n_replicas)
idx = np.round(samples).astype(int)
sampler_state_list = [state for i, state in
enumerate(sampler_state_list) if i in idx]
assert len(sampler_state_list) == n_replicas
if endstates:
# generating unsampled endstates
unsampled_dispersion_endstates = create_endstates(
copy.deepcopy(thermodynamic_state_list[0]),
copy.deepcopy(thermodynamic_state_list[-1]))
self.create(thermodynamic_states=thermodynamic_state_list,
sampler_states=sampler_state_list, storage=reporter,
unsampled_thermodynamic_states=unsampled_dispersion_endstates)
else:
self.create(thermodynamic_states=thermodynamic_state_list,
sampler_states=sampler_state_list, storage=reporter)
class HybridRepexSampler(HybridCompatibilityMixin,
replicaexchange.ReplicaExchangeSampler):
"""
ReplicaExchangeSampler that supports unsampled end states with a different
number of positions
"""
def __init__(self, *args, hybrid_factory=None, **kwargs):
super(HybridRepexSampler, self).__init__(
*args, hybrid_factory=hybrid_factory, **kwargs)
self._factory = hybrid_factory
def create_endstates(first_thermostate, last_thermostate):
"""
Utility function to generate unsampled endstates
1. Move all alchemical atom LJ parameters from CustomNonbondedForce to
NonbondedForce.
2. Delete the CustomNonbondedForce.
3. Set PME tolerance to 1e-5.
4. Enable LJPME to handle long range dispersion corrections in a physically
reasonable manner.
Parameters
----------
first_thermostate : openmmtools.states.CompoundThermodynamicState
The first thermodynamic state for which an unsampled endstate will be
created.
last_thermostate : openmmtools.states.CompoundThermodynamicState
The last thermodynamic state for which an unsampled endstate will be
created.
Returns
-------
unsampled_endstates : list of openmmtools.states.CompoundThermodynamicState
The corrected unsampled endstates.
"""
unsampled_endstates = []
for master_lambda, endstate in zip([0., 1.],
[first_thermostate, last_thermostate]):
dispersion_system = endstate.get_system()
energy_unit = unit.kilocalories_per_mole
# Find the NonbondedForce (there must be only one)
forces = {force.__class__.__name__: force for
force in dispersion_system.getForces()}
# Set NonbondedForce to use LJPME
ljpme = openmm.NonbondedForce.LJPME
forces['NonbondedForce'].setNonbondedMethod(ljpme)
# Set tight PME tolerance
TIGHT_PME_TOLERANCE = 1.0e-5
forces['NonbondedForce'].setEwaldErrorTolerance(TIGHT_PME_TOLERANCE)
# Move alchemical LJ sites from CustomNonbondedForce back to
# NonbondedForce
for particle_index in range(forces['NonbondedForce'].getNumParticles()):
charge, sigma, epsilon = forces['NonbondedForce'].getParticleParameters(particle_index)
sigmaA, epsilonA, sigmaB, epsilonB, unique_old, unique_new = forces['CustomNonbondedForce'].getParticleParameters(particle_index)
if (epsilon/energy_unit == 0.0) and ((epsilonA > 0.0) or (epsilonB > 0.0)):
sigma = (1-master_lambda)*sigmaA + master_lambda*sigmaB
epsilon = (1-master_lambda)*epsilonA + master_lambda*epsilonB
forces['NonbondedForce'].setParticleParameters(
particle_index, charge,
sigma, epsilon)
# Delete the CustomNonbondedForce since we have moved all alchemical
# particles out of it
for force_index, force in enumerate(list(dispersion_system.getForces())):
if force.__class__.__name__ == 'CustomNonbondedForce':
custom_nonbonded_force_index = force_index
break
dispersion_system.removeForce(custom_nonbonded_force_index)
# Set all parameters to master lambda
for force_index, force in enumerate(list(dispersion_system.getForces())):
if hasattr(force, 'getNumGlobalParameters'):
for parameter_index in range(force.getNumGlobalParameters()):
if force.getGlobalParameterName(parameter_index)[0:7] == 'lambda_':
force.setGlobalParameterDefaultValue(parameter_index,
master_lambda)
# Store the unsampled endstate
unsampled_endstates.append(ThermodynamicState(
dispersion_system, temperature=endstate.temperature))
return unsampled_endstates
| {
"alphanum_fraction": 0.6339406862,
"author": null,
"avg_line_length": 44.6666666667,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e0ee79fc6690b6ffbde44b879a716030ee527caa",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2022-02-21T06:28:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-24T18:45:54.000Z",
"max_forks_repo_head_hexsha": "d4c78af62a7ae05b99eb95d173661ac134b7e7b9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mikemhenry/openfe",
"max_forks_repo_path": "openfe/setup/_rbfe_utils/multistate.py",
"max_issues_count": 109,
"max_issues_repo_head_hexsha": "d4c78af62a7ae05b99eb95d173661ac134b7e7b9",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T20:13:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-24T18:57:05.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mikemhenry/openfe",
"max_issues_repo_path": "openfe/setup/_rbfe_utils/multistate.py",
"max_line_length": 141,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "d4c78af62a7ae05b99eb95d173661ac134b7e7b9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mikemhenry/openfe",
"max_stars_repo_path": "openfe/setup/_rbfe_utils/multistate.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T04:58:35.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-24T22:01:19.000Z",
"num_tokens": 2088,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10318
} |
import numpy as np
import pandas as pd
import requests
from lxml import etree
from lxml import html
import os
import gc
HPA_TSV_FILE = 'input/proteinatlas.tsv'
TEMP_URLS_FILE = "new_data_urls.csv"
DOWNLOAD_IMAGES_DIR = "input/new_data"
if not os.path.exists(os.path.dirname(TEMP_URLS_FILE)):
os.makedirs(os.path.dirname(TEMP_URLS_FILE))
if not os.path.exists(DOWNLOAD_IMAGES_DIR):
os.makedirs(DOWNLOAD_IMAGES_DIR)
protein_atlas = pd.read_csv('input/proteinatlas.tsv', sep='\t')
genes = []
urls = []
labels = []
with open(TEMP_URLS_FILE, 'w') as csv_file:
for i, row in protein_atlas.iterrows():
res = requests.get("https://www.proteinatlas.org/{0}.xml".format(row["Ensembl"]))
tree = etree.fromstring(res.content)
for data in tree.xpath('//cellExpression/subAssay/data'):
locations = data.xpath('./location/text()')
for im_url in data.xpath('./assayImage/image/imageUrl/text()'):
csv_file.write('{0},{1},{2}\n'.format(row["Ensembl"],
im_url[35:],
";".join(locations)))
image_names = []
parsed_labels = []
for i, row in protein_atlas.iterrows():
resp = requests.get("https://www.proteinatlas.org/{Ensembl}-{Gene}/cell".format(**row))
tree = html.fromstring(resp.text)
images = []
classes = []
for tr in tree.xpath('//th[contains(@class, "cellImages")]/table[2]/tbody/tr'):
bases = tr.xpath('.//img[@base]/@base')
klass = tr.xpath('.//td[not(@rowspan)][1]/text()')[0]
if bases:
for img in images:
image_names.append(img)
parsed_labels.append(";".join(classes))
images = [b for b in bases]
classes.clear()
classes.append(klass.lower())
for img in images:
image_names.append(img)
parsed_labels.append(";".join(classes))
new_data_labels = pd.DataFrame()
new_data_labels['Image'] = image_names
new_data_labels['Site Parsed Labels'] = parsed_labels
labels_dict = {
"Nucleoplasm": 0,
"Nuclear membrane": 1,
"Nucleoli": 2,
"Nucleoli fibrillar center": 3,
"Nuclear speckles": 4,
"Nuclear bodies": 5,
"Endoplasmic reticulum": 6,
"Golgi apparatus": 7,
"Peroxisomes": 8,
"Endosomes": 9,
"Lysosomes": 10,
"Intermediate filaments": 11,
"Actin filaments": 12,
"Focal adhesion sites": 13,
"Microtubules": 14,
"Microtubule ends": 15,
"Cytokinetic bridge": 16,
"Mitotic spindle": 17,
"Microtubule organizing center": 18,
"Centrosome": 19,
"Lipid droplets": 20,
"Plasma membrane": 21,
"Cell junctions": 22,
"Mitochondria": 23,
"Aggresome": 24,
"Cytosol": 25,
"Cytoplasmic bodies": 26,
"Rods & rings": 27,
# BONUS!
'Midbody': 16,
# 'Cleavage furrow':-2,
'Nucleus': 0,
# 'Vesicles':-4,
'Midbody ring': 16
}
labels_dict_lower = dict((k.lower(), i) for k, i in labels_dict.items())
new_data = pd.read_csv(TEMP_URLS_FILE, header=None, names=["Gene", "Url", "XML Labels"])
new_data.drop_duplicates(["Url"], inplace=True)
new_data.index = new_data["Url"].apply(lambda x: "/images/" + x.replace("_blue_red_green.jpg", ""))
new_data.drop("Url", axis=1, inplace=True)
new_data_labels.drop_duplicates(inplace=True)
all_new_data = new_data_labels.join(new_data, on="Image")
all_new_data.fillna("", inplace=True)
for c in ['Site Parsed Labels', "XML Labels"]:
all_new_data[c] = all_new_data[c].apply(lambda x: " ".join(map(str, sorted(
set([labels_dict_lower[l.lower()] for l in x.split(';') if l in labels_dict_lower]), reverse=True))))
all_new_data = all_new_data.loc[~(all_new_data['Site Parsed Labels'] == "") | ~(all_new_data['XML Labels'] == "")]
all_new_data["img id"] = all_new_data["Image"].apply(lambda x: x.split("/")[-1])
already_downloaded = pd.DataFrame(os.listdir(DOWNLOAD_IMAGES_DIR), columns=["Path"])
already_downloaded["img id"] = already_downloaded["Path"].apply(lambda x: x[:x.find("_classes")])
for file in already_downloaded.loc[~already_downloaded["img id"].isin(all_new_data["img id"])]["Path"]:
os.remove(os.path.join(DOWNLOAD_IMAGES_DIR, file))
temp = all_new_data.set_index("img id")
for i, row in already_downloaded.loc[already_downloaded["img id"].isin(all_new_data["img id"])].iterrows():
new_name = "{0}_classes_{1}.jpg".format(row["img id"],
temp.loc[row["img id"], "Site Parsed Labels"].replace(" ", "_"))
os.rename(os.path.join(DOWNLOAD_IMAGES_DIR, row["Path"]), os.path.join(DOWNLOAD_IMAGES_DIR, new_name))
del temp
to_download = all_new_data.loc[~all_new_data["img id"].isin(already_downloaded["img id"])]
for i, row in to_download.iterrows():
url = "https://www.proteinatlas.org{0}_blue_red_green.jpg".format(row['Image'])
filename = os.path.join(DOWNLOAD_IMAGES_DIR,
"{0}_classes_{1}.jpg".format(row["img id"], row["Site Parsed Labels"].replace(" ", "_")))
try:
with open(filename, "wb") as f:
f.write(requests.get(url).content)
except Exception as e:
print("ERROR?...", e)
| {
"alphanum_fraction": 0.6318818339,
"author": null,
"avg_line_length": 34.7533333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "027d76895837749ca44253137d709ff58b8f7d9e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "547d53aaca148fdb5f4585526ad7364dfa47967d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "guitarmind/HPA-competition-solutions",
"max_forks_repo_path": "one_more_layer_of_stacking/src/external_data.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "547d53aaca148fdb5f4585526ad7364dfa47967d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "guitarmind/HPA-competition-solutions",
"max_issues_repo_path": "one_more_layer_of_stacking/src/external_data.py",
"max_line_length": 117,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "547d53aaca148fdb5f4585526ad7364dfa47967d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "guitarmind/HPA-competition-solutions",
"max_stars_repo_path": "one_more_layer_of_stacking/src/external_data.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1411,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5213
} |
import json
import numpy as np
import os
import re
from pprint import pprint
answers_detail = None
with open('./data/nsc_questions_answers.json', encoding='utf-8-sig') as f:
answers_detail = (json.load(f))['data']
article_ids = []
for ans in answers_detail:
article_ids.append(ans['article_id'])
def get_vocab_wvs(wv_path, preprocessed_doc=None, vocabs=None):
fasttext_fp = open(wv_path, encoding='utf-8-sig')
white_spaces = ['', ' ']
if(not(vocabs) and preprocessed_doc):
vocabs = set([tk for tk in preprocessed_doc if tk not in white_spaces])
vocab_wvs = {}
line_count = 0
vocab_count = 0
for line in fasttext_fp:
if(line_count > 0):
line = line.split()
if(vocab_count < len(vocabs)):
if(line[0] in vocabs):
vocab_wvs[line[0]] = line[1:]
print('found %s %s total_len: %s' % (line_count, line[0], len(vocabs)))
vocab_count += 1
print(vocab_count)
else:
break
line_count += 1
return vocab_wvs
def preprocess_document(document):
preprocess_doc = []
for tk in document:
sp_url_pattern = re.compile(r"[\"#$%&\'()*+,-/:;<=>?@[\\\]^_`{\|}~“”!]|doc id=\"|url=|^https:(.*)|^wikipedia.org(.*)|\\u(.*)")
doc_pattern = re.compile(r"doc(.|[\n]*)")
acronym_pattern = re.compile(r"(([a-zA-Z\u0e00-\u0ef70-9]+[.])*[a-zA-Z\u0e00-\u0ef70-9]*)") # พ.ศ. ดร.
if(re.fullmatch(acronym_pattern, tk)):
preprocessed_tk = tk
else:
preprocessed_tk = re.sub(sp_url_pattern, '', tk)
preprocessed_tk = re.sub(doc_pattern, '', preprocessed_tk)
preprocessed_tk = ''.join(c for c in preprocessed_tk if not(c in ['(', ')', '–', '_', ',', '-', ';', '{', '}', ' ']))
preprocess_doc.append(preprocessed_tk)
return preprocess_doc
def track_answer(answer_detail, document):
answer_masks = []
tokens_range = []
counter = 0
end = 0
start = 0
for tk in document:
end += len(tk)
characters_index = (start, end)
ans_begin = answer_detail['answer_begin_position ']
ans_end = answer_detail['answer_end_position']
if(ans_begin - 1 in range(start, end) or ans_end - 1 in range(start, end)):
answer_masks.append(1)
else:
answer_masks.append(0)
counter += 1
start = end
tokens_range.append(characters_index)
return answer_masks, tokens_range
def vectorize_tokens(sentence, vocab_wvs=None, wvl=300):
word_vectors = np.zeros((len(sentence), wvl))
for i in range(len(sentence)):
try:
if(sentence[i] != '<PAD>'):
word_vectors[i, :] = vocab_wvs[sentence[i]]
except:
pass
return word_vectors
TKNED_DOCS_PATH = 'D:/Users/Patdanai/th-qasys-db/tokenized_wiki_corpus/'
TKNED_DOCS_PATH = 'C:/Users/Patdanai/Desktop/tokenized-th-wiki/'
MAX_SENTENCE_LENGTH = 80
OUTPUT_PATH = 'D:/Users/Patdanai/th-qasys-db/positive_sentences/'
OUTPUT_PATH = 'C:/Users/Patdanai/Desktop/492/positive/positive_tokenized/'
OUTPUT_PATH_NPY = 'D:/Users/Patdanai/th-qasys-db/positive_embedded/'
OUTPUT_PATH_NPY = 'C:/Users/Patdanai/Desktop/492/positive/positive_embedded/'
wv_path = 'C:/Users/Patdanai/Desktop/261499-nlp/lab/cc.th.300.vec'
if __name__ == "__main__":
# put wvs to memory
batch_size = 4000
start = 0
batch_vocabs = []
for i in range(start, batch_size + start):
with open('%s%s.json' % (TKNED_DOCS_PATH, article_ids[i]), encoding='utf-8-sig') as f:
current_doc = json.load(f)
preprocessed_doc = preprocess_document(current_doc)
batch_vocabs += preprocessed_doc
batch_vocabs = set(batch_vocabs)
batch_vocabs.remove('')
vocab_wvs = get_vocab_wvs(wv_path, vocabs=batch_vocabs)
for i in range(start, batch_size + start):
with open('%s%s.json' % (TKNED_DOCS_PATH, article_ids[i]), encoding='utf-8-sig') as f:
current_doc = json.load(f)
answer_masks, tokens_range = track_answer(answers_detail[i], current_doc)
preprocessed_doc = preprocess_document(current_doc)
answer_idx = []
for j in range(len(answer_masks)):
if(answer_masks[j]):
answer_idx.append(j)
positive_sample = []
positive_sample_ans_masks = []
positive_sample_char_range = []
positive_sample_index = []
first_ans_tk = answer_idx[0]
if(preprocessed_doc[first_ans_tk] in ['', ' ']):
first_ans_tk = last_ans_tk
last_ans_tk = answer_idx[-1]
if(preprocessed_doc[last_ans_tk] in ['', ' ']):
last_ans_tk = first_ans_tk
l_count = 0
r_count = 0
l_step = 0
r_step = 0
while(l_count + r_count < MAX_SENTENCE_LENGTH and l_count + r_count < len(preprocessed_doc)):
try:
l_token = preprocessed_doc[first_ans_tk + l_step]
l_token_index = first_ans_tk + l_step
l_token_mask = answer_masks[first_ans_tk + l_step]
l_token_range = tokens_range[first_ans_tk + l_step]
if(first_ans_tk + l_step < 0):
l_step = 0
elif(l_token in ['', ' ']):
l_step -= 1
else:
positive_sample.insert(0, l_token)
positive_sample_ans_masks.insert(0, l_token_mask)
positive_sample_char_range.insert(0, l_token_range)
positive_sample_index.insert(0, l_token_index)
l_count += 1
l_step -= 1
except IndexError:
l_count += 1
try:
r_token = preprocessed_doc[last_ans_tk + r_step]
r_token_index = last_ans_tk + r_step
r_token_mask = answer_masks[last_ans_tk + r_step]
r_token_range = tokens_range[last_ans_tk + r_step]
if(last_ans_tk + r_step > len(preprocessed_doc) - 1):
pass
elif(r_token in ['', ' ']):
r_step += 1
else:
positive_sample.append(r_token)
positive_sample_ans_masks.append(r_token_mask)
positive_sample_char_range.append(r_token_range)
positive_sample_index.append(r_token_index)
r_count += 1
r_step += 1
except IndexError:
l_step += len(preprocessed_doc) - last_ans_tk - r_step
r_step += 1
words_per_sample = 40
sample_num = 10
embedded_sentences = []
positive_samples = []
start_idx = positive_sample_index.index(first_ans_tk) - words_per_sample // 2 - sample_num // 2
for j in range(0, sample_num, 2):
try:
if(start_idx - j > -1 and start_idx + words_per_sample - j < len(positive_sample) - 1):
sample = positive_sample[start_idx - j:start_idx + words_per_sample - j]
sample_index = positive_sample_index[start_idx - j:start_idx + words_per_sample - j]
sample_char_range = positive_sample_char_range[start_idx - j:start_idx + words_per_sample - j]
mask = [0] * words_per_sample
if(last_ans_tk - first_ans_tk > 0):
for k in range(positive_sample_index.index(first_ans_tk) - start_idx + j, positive_sample_index.index(last_ans_tk) - start_idx + j):
mask[k] = 1
else:
mask[positive_sample_index.index(first_ans_tk) - start_idx + j] = 1
else:
sample = positive_sample[:]
sample_index = positive_sample_index[:]
sample_char_range = positive_sample_char_range[:]
mask = [0] * len(positive_sample_index)
if(len(sample) > words_per_sample):
if(last_ans_tk - first_ans_tk > 0):
for k in range(positive_sample_index.index(first_ans_tk), positive_sample_index.index(last_ans_tk)):
mask[k] = 1
else:
mask[positive_sample_index.index(first_ans_tk)] = 1
l_removal, r_removal = 0, len(sample)
while(len(sample) > words_per_sample):
try:
temp_first = positive_sample_index.index(first_ans_tk)
temp_last = positive_sample_index.index(last_ans_tk)
except:
temp_last = temp_first
if(l_removal - 1 < r_removal - temp_last):
sample.pop()
sample_index.pop()
sample_char_range.pop()
mask.pop()
r_removal -= 1
elif(r_removal < l_removal - 1):
sample.pop(0)
sample_index.pop(0)
sample_char_range.pop(0)
mask.pop(0)
l_removal += 1
elif(r_removal - temp_last > 0):
sample.pop()
sample_index.pop()
sample_char_range.pop()
mask.pop()
r_removal -= 1
else:
sample.pop(0)
sample_index.pop(0)
sample_char_range.pop(0)
mask.pop(0)
l_removal += 1
else:
sample_ans_mask = positive_sample_ans_masks[:]
while(len(sample) < words_per_sample):
sample.insert(0, '<PAD>')
sample_ans_mask.insert(0, 0)
sample_char_range.insert(0, (-1, -1))
sample_index.insert(0, -1)
except IndexError:
exit('Index Error: from line 276')
positive = {
'article_id': article_ids[i],
'question_id': i + 1,
'sample_answer_maks': mask,
'sample_character_range': sample_char_range,
'sample_index': sample_index,
'sample_sentence': sample,
}
positive_samples.append(positive)
es = vectorize_tokens(sample, vocab_wvs=vocab_wvs)
embedded_sentences.append(es)
out_file_name = '%spositive_question%s.json' % (OUTPUT_PATH, i)
out_file_name_npy = '%spositive_question%s.npy' % (OUTPUT_PATH_NPY, i)
with open(out_file_name, 'w', encoding='utf-8-sig', errors='ignore') as f:
json.dump(positive_samples, f, ensure_ascii=False)
np.save(out_file_name_npy, np.asarray(embedded_sentences))
print(i, 'positive:', np.array(embedded_sentences).shape)
print()
| {
"alphanum_fraction": 0.5287872249,
"author": null,
"avg_line_length": 39.9482758621,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b8bd7ab8926a4e96bb1719741d3cd8eb985e6da5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-02-26T03:31:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-17T10:20:42.000Z",
"max_forks_repo_head_hexsha": "6ee4a5e13642e45e364e4813475c1be46afbecd5",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "patda9/th-qa-system-261491",
"max_forks_repo_path": "src/positive_generator.py",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "6ee4a5e13642e45e364e4813475c1be46afbecd5",
"max_issues_repo_issues_event_max_datetime": "2022-02-09T23:29:30.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-10-17T10:53:40.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "patda9/th-qa-system-261491",
"max_issues_repo_path": "src/positive_generator.py",
"max_line_length": 156,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "6ee4a5e13642e45e364e4813475c1be46afbecd5",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "patda9/th-qa-system-261491",
"max_stars_repo_path": "src/positive_generator.py",
"max_stars_repo_stars_event_max_datetime": "2021-07-16T18:10:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-10T06:48:42.000Z",
"num_tokens": 2593,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11585
} |
'''
Script to convert a MAF to a vcf4.2 file using python >=3.6.
Created by Ryan Schenck
8 March 2018
'''
import os
import sys
from optparse import OptionParser
import subprocess
from functools import wraps
import datetime
import time
import numpy as np
def OptionParsing():
usage = 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>'
parser = OptionParser(usage)
parser.add_option('-i', '--input_maf', dest="maf", default=None, help=".maf file to be converted.")
parser.add_option('-o', '--output_dir', dest="outDir", default=None, help="Output directory for .vcf file")
parser.add_option('-r', '--ref_genome', dest="refGenome", default="/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa", help="Reference genome to be used for maf2vcf conversion.")
parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help="Use this flag to verify reference matching to maf file. Default=False")
parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help="Use this flag to turn on verbose mode. Default=False")
(options, args) = parser.parse_args()
if options.maf is None or options.outDir is None or options.refGenome is None:
print("ERROR: Please include arguments for maf file, output directory, and reference genome (single fasta file).")
sys.exit()
else:
pass
return (options, parser)
def fn_timer(function):
'''
Use this as a wrapper at the top of any function you want to get run time information about.
:param function: Function of interest.
:return: A function to wrap around a function.
'''
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
print ("INFO: Total time running %s: %s minutes" %
(function.__name__, str(round((t1-t0)/60.,2)))
)
return result
return function_timer
def UpdateProgressGetN(fileName):
if fileName[len(fileName)-1]=="z":
cmd = "gzip -cd %s | wc -l" % (fileName)
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
else:
cmd = "wc -l %s" % (fileName)
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
return(int(pipe.read().decode("utf-8").lstrip(" ").split(" ")[0]))
def UpdateProgress(i, n, DisplayText):
'''
Prints a progress bar where appropriate.
:param i: Current Step
:param n: Total number of steps.
:param DisplayText: A string that you want to print out that is informative.
:return: None
'''
sys.stdout.write('\r')
j = (i + 1) / n
sys.stdout.write("[%-20s] %d%%\t INFO: %s" % ('=' * int(20 * j), 100 * j, DisplayText))
sys.stdout.flush()
def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True):
'''
Obtain reference sequence and perform check if needed.
:param check: Whether or not to throw error if the provided reference matches
:param refGenome: Reference Fasta file
:param genomicPos: Genomic Position of interest.
:param ref: Reference sequence to compare to fetched sequence.
:return: Fetched reference sequence.
'''
proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE)
proc.wait()
outInfo = proc.stdout.readlines()
refSeq = ''.join([line.decode('utf-8').rstrip('\n') for line in outInfo[1:]])
if check:
if refSeq == ref:
return(True)
else:
print('ERROR: May not be proper reference genome')
print('ERROR: Improper reference. Found %s at %s. Reference genome shows %s' % (ref, genomicPos, refSeq))
sys.exit()
return(None)
else:
return(refSeq)
def SpotCheckProperReference(mafFile, Options, fileLength):
'''
Randomly samples the file to ensure proper reference file is used. Random sampling is employed to ensure proper
reference is used. Will spot check 2% of a file of more than 200 variants.
:param mafFile: Input mafFile object (opened)
:param Options: Parser Options
:param fileLength: Length of the file being read
:return: None
'''
print("INFO: Verifying maf file.")
if fileLength > 200:
n=0.02
else:
n=1.
a = np.arange(fileLength)
np.random.shuffle(a)
a = list(a[:int(fileLength*n)])
i = 0
count = 0
for line in mafFile:
if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:
# checkIt = len([k for k in a if k==i])
# if checkIt==1:
UpdateProgress(count, len(a), "INFO: Verifying maf file")
count+=1
line = line.rstrip('\n').split('\t')
genomicPos = line[1] + ":" + line[2] + "-" + line[3]
ref = line[7]
mutType = line[5]
variantClass = line[6]
if variantClass != "INS" and variantClass != "TNP" and variantClass !="ONP":
toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref)
if count == len(a):
print('')
return(toContinue)
# else:
# print(checkIt)
# print(line)
# print([k for k in a])
# sys.exit("Problem here")
elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:
print("")
print("ERROR: No header found in maf file.")
elif line.startswith('Hugo_Symbol Chromosome Start_position') == True:
toContinue = True
else:
sys.exit("What the fuck")
i+=1
print('')
return(toContinue)
def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele
tAllele2 = line[9] # Alt Allele
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if ref == tAllele1:
altAllele = tAllele1
refAllele = tAllele2
else:
altAllele = tAllele2
refAllele = tAllele1
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
# Get phasing information and determine reads for vaf==1
if ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == '1':
GT = "1/1" # Appears to be homozygous for alternative allele (germline unlikely since it is called w.r.t normal?)
vaf = reportedVAF # Sets VAF equal to 1
if ref_reads == 'NA':
ref_reads = '.'
total_reads = alt_reads
else:
alt_reads = '.'
total_reads = ref_reads
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Tossing these very strange mutations within the MAF file.
elif ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == 'NA':
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
if Options.verbose:
print("WARNING: %s" % '\t'.join(line))
return(None)
# Simple SNV cases
else:
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = repr(round(int(alt_reads) / float(total_reads), 4))
if vaf != '1.' and strand=="+" or strand=="-":
GT="0|1"
else:
GT="0/1"
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Last check for interesting but unresolved MAF line
if (ref != tAllele1 and ref != tAllele2) or (strand != '+' and strand != '-'):
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
if Options.verbose:
print("WARNING: %s" % '\t'.join(line))
return(None)
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType +";DCC_Project_Code=" + line[44]
# Normal variant field if anything
if line[41]=="NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s"%(line[41])
# Final vcf line out
lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return(lineOut)
def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele Typically
tAllele2 = line[9] # Alt Allele Typically
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if ref == tAllele1:
altAllele = tAllele1
refAllele = tAllele2
else:
altAllele = tAllele2
refAllele = tAllele1
# Obtain the reference sequence + 1 preceding base for the DEL
refAnchorPos = str(int(pos)-1) # Fetch the base that precedes the deletion.
refSeq = SamtoolsFaidx(Options.refGenome, chrom + ":" + refAnchorPos + "-" + line[3], check=False)
if refSeq[1:] != altAllele:
print("ERROR: Deletion alternative allele does not match reference sequence. %s" % ('\t'.join(line)))
sys.exit()
# VCF reference is the preceding base plus the reported deletion in the MAF file.
vcfRef = refSeq
# VCF has base directly preceding the deletion as the alternative base and the variant pos
vcfAlt=refSeq[0]
vcfPos=refAnchorPos
# Get read information
iref_reads = line[37]
ialt_reads = line[36]
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
i_t_vaf = line[43]
# Get phasing information and determine reads for vaf==1
if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'):
GT="0/1"
ref_reads = [read for read in [ref_reads, iref_reads] if read != "NA"][0]
alt_reads = [read for read in [alt_reads, ialt_reads] if read != "NA"][0]
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = str(int(alt_reads)/float(total_reads))
elif i_t_vaf!="" and i_t_vaf!="NA" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':
vaf=i_t_vaf
GT="./."
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
elif (i_t_vaf=="" or i_t_vaf=="NA") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':
GT='./.'
ref_reads='.'
alt_reads='.'
total_reads='.'
vaf='.'
else:
sys.exit("ERROR: Problem processing DEL %s"%('\t'.join(line)))
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[
14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType + ";DCC_Project_Code=" + \
line[44]
# Normal variant field if anything
if line[41] == "NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s" % (line[41])
lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return(lineOut)
def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):
ref = line[7]
tAllele1 = line[8] # Normal Allele Typically
tAllele2 = line[9] # Alt Allele Typically
QUAL = line[42]
if QUAL == 'None' or QUAL == 'NA' or QUAL == '':
QUAL = '.'
if tAllele1 == '-':
altAllele = tAllele2
else:
altAllele = tAllele1
# Obtain the reference sequence + 1 preceding base for the DEL
refAnchorPos = str(int(pos) - 1) # Fetch the base that precedes the deletion.
refSeq = SamtoolsFaidx(Options.refGenome, chrom + ":" + refAnchorPos + "-" + line[3], check=False)
# VCF reference is the preceding base in the insertion in MAF
vcfRef = refSeq[0]
# VCF has base directly preceding the deletion as the alternative base and the variant pos
vcfAlt = refSeq[0]+altAllele
vcfPos = refAnchorPos
# Get read information
iref_reads = line[37]
ialt_reads = line[36]
ref_reads = line[39]
alt_reads = line[38]
reportedVAF = line[28]
i_t_vaf = line[43]
# Get phasing information and determine reads for vaf==1
if (ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'):
GT = "0/1"
ref_reads = [read for read in [ref_reads, iref_reads] if read != "NA"][0]
alt_reads = [read for read in [alt_reads, ialt_reads] if read != "NA"][0]
total_reads = str(int(ref_reads) + int(alt_reads))
vaf = str(int(alt_reads) / float(total_reads))
elif i_t_vaf != "" and i_t_vaf != "NA" and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA':
vaf = i_t_vaf
GT = "./."
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
elif (
i_t_vaf == "" or i_t_vaf == "NA") and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA':
GT = './.'
ref_reads = '.'
alt_reads = '.'
total_reads = '.'
vaf = '.'
else:
sys.exit("ERROR: Problem processing INS %s" % ('\t'.join(line)))
sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])
# Create INFO field
INFO = "MAF_Hugo_Symbol=" + line[0] + ";MAF_ref_context=" + line[15].upper() + ";MAF_Genome_Change=" + line[
14] + ";MAF_Variant_Type=" + variantType + ";MAF_Variant_Classification=" + mutType + ";DCC_Project_Code=" + \
line[44]
# Normal variant field if anything
if line[41] == "NA":
normalGenotype = ".:.,.:.:."
else:
normalGenotype = ".:.,.:.:%s" % (line[41])
lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, "GT:AD:DP:VF", normalGenotype, sampleField]
return (lineOut)
def CreateVCFLine(line, errorFile, Options):
line = line.rstrip('\n').split('\t')
# Genomic Position
chrom, pos, id = line[1], line[2], line[10]
# Get rs ID
rsid = line[10]
if rsid == '':
rsid = '.'
elif rsid.startswith("rs") == False:
if Options.verbose:
print("ERROR: %s"%(line))
sys.exit("ERROR: Problem in id column")
# Strand Information
strand = line[4]
# Variant Classification/Type (Type is SNP, INS, DEL, etc.)
mutType = line[5]
variantType = line[6]
# Create proper vcf formatted information
if mutType == '':
mutType = '.'
if variantType == '':
variantType = '.'
# Determine type of variant to continue processing.
linetowrite = None
if variantType=="SNP":
linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="DEL":
linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="INS":
linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)
elif variantType=="TNP" or variantType=="ONP":
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
else: # This may seem duplicitious, but I explicityly want to know as much of what I'm choosing to filter out as possible...
if Options.verbose:
print("WARNING: Malformed MAF entry. %s"%('\t'.join(line)))
print('')
with open(errorFile, 'a') as errerOut:
errerOut.write('\t'.join(line)+'\n')
# print(line)
# sys.exit("ERROR: Malformed MAF entry.")
return(linetowrite)
def CreateHeader(ioObject, Options, tumorID, normalID):
now = datetime.datetime.now()
ioObject.write("##fileformat=VCFv4.2\n")
ioObject.write("##fileDate=%s\n"%(now.date()))
ioObject.write("##source=maf2vcf.py\n")
ioObject.write("##reference=%s\n"%(Options.refGenome))
ioObject.write("##sampleColumns=Normal.Tumor\n")
ioObject.write("##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\"HUGO Symbol in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\"Reference context in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\"Genome change in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\"Variant type (SNP,INS,DEL) in original MAF file.\">\n")
ioObject.write("##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\"Variant Classification (if SNP) in original MAF file.\">\n")
ioObject.write("##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\"DCC Project Code in original MAF file.\">\n")
ioObject.write("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
ioObject.write("##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\"Allelic depths of REF and ALT(s) in the order listed\">\n")
ioObject.write("##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Total read depth across this site\">\n")
ioObject.write("##FORMAT=<ID=VF,Number=1,Type=Float,Description=\"Variant Allele Frequency.\">\n")
ioObject.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\t%s\n"%(normalID,tumorID))
@fn_timer
def ProcessFile(Options):
n = UpdateProgressGetN(Options.maf)
if Options.spotcheck:
with open(Options.maf, 'r') as inFile:
SpotCheckProperReference(inFile, Options, n)
with open(Options.maf,'r') as inFile:
i = 0
for line in inFile:
if i == 1:
toPullIDs = line.rstrip('\n').split('\t')
break
else:
header = line
i+=1
tumorID = toPullIDs[12]
normalID = toPullIDs[13]
count = 0
i = 0
with open(Options.maf, 'r') as inFile:
with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF:
errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf')
with open(errorFile, 'w') as errorOut:
errorOut.write(header)
CreateHeader(outVCF, Options, tumorID, normalID)
for line in inFile:
UpdateProgress(i, n, "Processing Maf File")
if line.startswith('Hugo_Symbol Chromosome Start_position'):
count+=1
i += 1
else:
i += 1
linetoWrite = CreateVCFLine(line, errorFile, Options)
if linetoWrite is not None:
outVCF.write('\t'.join(linetoWrite)+'\n')
print('')
print("INFO: Sorting vcf file.")
vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf')
vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz')
os.system("cat %s | awk '$1 ~ /^#/ {print $0;next} {print $0 | \"LC_ALL=C sort -k1,1 -k2,2n\"}' | gzip > %s"%(vcfFile, vcfFileSorted))
os.system("rm %s"%(vcfFile))
os.system("gzip %s"%(errorFile))
def main():
print("INFO: Processing MAF file.")
FilePath = os.path.dirname(os.path.abspath(__file__))
(Options, Parser) = OptionParsing()
ProcessFile(Options)
if __name__=="__main__":
main() | {
"alphanum_fraction": 0.6060713025,
"author": null,
"avg_line_length": 39.5828343313,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b59936a6f554ea3fca68db92693b83b6d3fc07dc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "797fdeee8448397b098c87cddb0e211b7b69c0bc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rschenck/DPhilRotation1Part2",
"max_forks_repo_path": "DataGrooming/maf2vcf.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "797fdeee8448397b098c87cddb0e211b7b69c0bc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rschenck/DPhilRotation1Part2",
"max_issues_repo_path": "DataGrooming/maf2vcf.py",
"max_line_length": 228,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "797fdeee8448397b098c87cddb0e211b7b69c0bc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rschenck/DPhilRotation1Part2",
"max_stars_repo_path": "DataGrooming/maf2vcf.py",
"max_stars_repo_stars_event_max_datetime": "2021-02-10T05:22:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-10T05:22:24.000Z",
"num_tokens": 5584,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 19831
} |
#ifndef OPENMC_VOLUME_CALC_H
#define OPENMC_VOLUME_CALC_H
#include "openmc/position.h"
#include "openmc/tallies/trigger.h"
#include "pugixml.hpp"
#include "xtensor/xtensor.hpp"
#include <array>
#include <string>
#include <vector>
#include <gsl/gsl>
namespace openmc {
//==============================================================================
// Volume calculation class
//==============================================================================
class VolumeCalculation {
public:
// Aliases, types
struct Result {
std::array<double, 2> volume; //!< Mean/standard deviation of volume
std::vector<int> nuclides; //!< Index of nuclides
std::vector<double> atoms; //!< Number of atoms for each nuclide
std::vector<double> uncertainty; //!< Uncertainty on number of atoms
int iterations; //!< Number of iterations needed to obtain the results
}; // Results for a single domain
// Constructors
VolumeCalculation(pugi::xml_node node);
// Methods
//! \brief Stochastically determine the volume of a set of domains along with the
//! average number densities of nuclides within the domain
//
//! \return Vector of results for each user-specified domain
std::vector<Result> execute() const;
//! \brief Write volume calculation results to HDF5 file
//
//! \param[in] filename Path to HDF5 file to write
//! \param[in] results Vector of results for each domain
void to_hdf5(const std::string& filename, const std::vector<Result>& results) const;
// Tally filter and map types
enum class TallyDomain {
UNIVERSE,
MATERIAL,
CELL
};
// Data members
TallyDomain domain_type_; //!< Type of domain (cell, material, etc.)
size_t n_samples_; //!< Number of samples to use
double threshold_ {-1.0}; //!< Error threshold for domain volumes
TriggerMetric trigger_type_ {TriggerMetric::not_active}; //!< Trigger metric for the volume calculation
Position lower_left_; //!< Lower-left position of bounding box
Position upper_right_; //!< Upper-right position of bounding box
std::vector<int> domain_ids_; //!< IDs of domains to find volumes of
private:
//! \brief Check whether a material has already been hit for a given domain.
//! If not, add new entries to the vectors
//
//! \param[in] i_material Index in global materials vector
//! \param[in,out] indices Vector of material indices
//! \param[in,out] hits Number of hits corresponding to each material
void check_hit(int i_material, std::vector<int>& indices,
std::vector<int>& hits) const;
};
//==============================================================================
// Global variables
//==============================================================================
namespace model {
extern std::vector<VolumeCalculation> volume_calcs;
}
//==============================================================================
// Non-member functions
//==============================================================================
void free_memory_volume();
} // namespace openmc
#endif // OPENMC_VOLUME_CALC_H
| {
"alphanum_fraction": 0.6031078019,
"author": null,
"avg_line_length": 32.5157894737,
"converted": null,
"ext": "h",
"file": null,
"hexsha": "3258be9d6b583ed2d0fbe7f264002426da978551",
"include": null,
"lang": "C",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2020-03-22T20:54:48.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-07-31T21:03:25.000Z",
"max_forks_repo_head_hexsha": "ffe0f0283a81d32759e4f877909bbb64d5ad0d3d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mehmeturkmen/openmc",
"max_forks_repo_path": "include/openmc/volume_calc.h",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "ffe0f0283a81d32759e4f877909bbb64d5ad0d3d",
"max_issues_repo_issues_event_max_datetime": "2021-04-01T15:23:23.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-03-14T12:18:06.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mehmeturkmen/openmc",
"max_issues_repo_path": "include/openmc/volume_calc.h",
"max_line_length": 105,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "c5f66a3af5c1a57087e330f7b870e89a82267e4b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Hit-Weixg/openmc",
"max_stars_repo_path": "include/openmc/volume_calc.h",
"max_stars_repo_stars_event_max_datetime": "2019-05-05T10:18:12.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-01-10T13:14:35.000Z",
"num_tokens": 644,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3089
} |
module io
use, intrinsic :: iso_fortran_env, only : REAL64, INT16, INT32, INT64, stdout=>output_unit, stdin=>input_unit
integer NDataFiles
character(len=25), allocatable :: fnames(:)
integer, allocatable :: nrd(:)
real(REAL64), allocatable :: raw(:,:,:)
real(REAL64), allocatable :: e0raw(:), e1raw(:), Araw(:), Qraw(:)
integer, allocatable :: productsraw(:)
character(len=*), parameter, public :: paramformat = '((A),(Es9.3),(A8))'
integer, parameter :: in_f = 11, cs_f = 13, eedf_f = 17, rate_f = 19, pops_f = 23
contains
subroutine read_program_input(Nruns, tfin, dt, e0, p, Npops, neexpr)
implicit none
integer row, col, i
integer(INT64) Nruns
real(REAL64) tfin, dt, e0, p, NRunsreal
integer Npops
character(len=*) neexpr
read(stdin,*) NRunsreal, tfin, dt, e0, p
read(stdin,*) neexpr
read(stdin,*) NDataFiles, Npops
allocate(fnames(NDataFiles))
allocate(nrd(NDataFiles))
allocate(e0raw(NDataFiles))
allocate(e1raw(NDataFiles))
allocate(productsraw(NDataFiles))
allocate(Araw(Npops), Qraw(Npops))
read(stdin,*) Araw, Qraw
read(stdin,*) fnames
Nruns = int(NRunsreal)
if (NRunsreal .gt. huge(NRunsreal)) then
write(stdout,*) "# WARNING: overflow in input N - lower the number of simulated particles!"
end if
! first pass over data files: count line numbers
do i=1, NDataFiles
nrd(i) = lines_in_file(trim(fnames(i)))-1
end do
! allocate space for raw data
allocate(raw(maxval(nrd),2,NDataFiles))
! second pass over data files: read data into memory
do i=1, NDataFiles
! open data file
open(cs_f, file=trim(fnames(i)), status='old')
! read metadata
read(unit=cs_f,fmt=*) e0raw(i), e1raw(i), productsraw(i)
! read cross-sections
read(unit=cs_f,fmt=*) ((raw(row,col,i),col=1,2),row=1,nrd(i))
! Close the file so the handle can be reused
close(cs_f)
end do
! Rescale the data to SI units - expecting cross-sections in cm^2
raw(:,2,:) = raw(:,2,:)*1.0e-4
end subroutine read_program_input
subroutine clean_up_io()
implicit none
deallocate(e0raw)
deallocate(e1raw)
deallocate(fnames)
deallocate(nrd)
deallocate(productsraw)
deallocate(raw)
end subroutine clean_up_io
function lines_in_file(fname)
implicit none
integer lines_in_file, stat
character*(*) fname
character*120 line
open(20, file=fname)
lines_in_file = 0
read (20,*,iostat=stat) line
do while(stat.eq.0)
lines_in_file = lines_in_file+1
read (20,*,iostat=stat) line
enddo
close(20)
end function lines_in_file
function padr(string, length) result(r)
character(len=*) :: string
integer :: length
character(len=length) :: r
r = adjustl(string)
end function padr
end module io
| {
"alphanum_fraction": 0.5502941176,
"author": null,
"avg_line_length": 29.3103448276,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "225fb1f5c70f6f264a66d4faec466ecb2e06ba5f",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e336148d66879b225f2c0499526f56d304b3e8d6",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "tomasaschan/bari-montecarlo",
"max_forks_repo_path": "src/io.f",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "e336148d66879b225f2c0499526f56d304b3e8d6",
"max_issues_repo_issues_event_max_datetime": "2017-09-21T14:21:43.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-21T14:21:43.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "tomasaschan/bari-montecarlo",
"max_issues_repo_path": "src/io.f",
"max_line_length": 117,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "e336148d66879b225f2c0499526f56d304b3e8d6",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "tlycken/bari-montecarlo",
"max_stars_repo_path": "src/io.f",
"max_stars_repo_stars_event_max_datetime": "2018-03-29T14:12:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-25T12:20:30.000Z",
"num_tokens": 879,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3400
} |
"""
DataModule for the OGB Drug-Drug-Interaction dataset
DataModule for link-prediction
"""
from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union
import dgl
import numpy as np
import pytorch_lightning as pl
import torch
from dgl.data.graph_serialize import load_labels_v2
from dgl.dataloading.negative_sampler import Uniform
from ogb.linkproppred import DglLinkPropPredDataset
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from scipy.sparse.csgraph import shortest_path
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from zero_to_hero.data.statistics import standardize_data
class GraphDataSet(Dataset):
"""
GraphDataset for torch DataLoader
"""
def __init__(self, path: Union[Path, str]) -> None:
self.path = str(path)
self.links = load_labels_v2(filename=self.path)["links"]
def __len__(self) -> int:
return self.links.shape[0]
def __getitem__(self, index: int) -> Tuple[dgl.DGLHeteroGraph, torch.Tensor]:
graph, _ = dgl.load_graphs(filename=self.path, idx_list=[index])
return graph[0], self.links[index]
class EdgeDataSet(Dataset):
"""
Edge Dataset for speeding up the sampling of the graphs
"""
def __init__(
self,
edges: torch.Tensor,
links: torch.Tensor,
transform: Callable[[torch.Tensor], dgl.DGLHeteroGraph],
) -> None:
self.edges = edges
self.transform = transform
self.links = links
def __len__(self) -> int:
return self.edges.shape[0]
def __getitem__(self, index: int) -> Tuple[dgl.DGLHeteroGraph, torch.Tensor]:
subgraph = self.transform(self.edges[index])
return subgraph, self.links[index]
def double_radius_node_labeling(subgraph: dgl.DGLHeteroGraph, src: int, dst: int) -> torch.Tensor:
"""
Double Radius Node labeling
d = r(i, u) + r(i, v)
node_label = 1 + min(r(i, u), r(i, v)) + (d // 2) * (d // 2 + d % 2 - 1)
Isolated nodes in subgraph will be set as zero.
Extreme large graph may cause memory error.
Args:
subgraph(DGLGraph): The graph
src(int): node id of one of src node in new subgraph
dst(int): node id of one of dst node in new subgraph
Returns:
node_label(Tensor): node labeling tensor
"""
adj = subgraph.adj().to_dense().numpy()
src, dst = (dst, src) if src > dst else (src, dst)
idx = list(range(src)) + list(range(src + 1, adj.shape[0]))
adj_wo_src = adj[idx, :][:, idx]
idx = list(range(dst)) + list(range(dst + 1, adj.shape[0]))
adj_wo_dst = adj[idx, :][:, idx]
dist2src = shortest_path(adj_wo_dst, directed=False, unweighted=True, indices=src)
dist2src = np.insert(dist2src, dst, 0, axis=0)
dist2src = torch.from_numpy(dist2src)
dist2dst = shortest_path(adj_wo_src, directed=False, unweighted=True, indices=dst - 1)
dist2dst = np.insert(dist2dst, src, 0, axis=0)
dist2dst = torch.from_numpy(dist2dst)
dist = dist2src + dist2dst
dist_over_2, dist_mod_2 = dist // 2, dist % 2
node_label = 1 + torch.min(dist2src, dist2dst)
node_label += dist_over_2 * (dist_over_2 + dist_mod_2 - 1)
node_label[src] = 1.0
node_label[dst] = 1.0
node_label[torch.isnan(node_label)] = 0.0
return node_label.to(torch.long)
class CollabDataModule(pl.LightningDataModule):
"""
Implementation of the PyTorch Lightning DataModule for the OGBL dataset Drug-Drug-Interaction
"""
def __init__(self, config: Dict) -> None:
super().__init__()
self.config = config
self.dataset: Dict[str, Optional[GraphDataSet]] = {
"train": None,
"valid": None,
"test": None,
"predict": None,
}
self.ndata: torch.Tensor = torch.empty(0)
self.edata: Dict[str, torch.Tensor] = {
"fit": torch.empty(0),
"test": torch.empty(0),
"predict": torch.empty(0),
}
def prepare_data(self) -> None:
DglLinkPropPredDataset(
name="ogbl-collab",
root="data",
)
def setup(self, stage: Optional[str] = None) -> None:
dataset = DglLinkPropPredDataset(
name="ogbl-collab",
root="data",
)
multi_graph: Dict[str, dgl.DGLHeteroGraph] = {"fit": dataset[0]}
split_edge = dataset.get_edge_split()
multi_graph["fit"] = dgl.add_self_loop(g=multi_graph["fit"])
for key, values in multi_graph["fit"].edata.items():
multi_graph["fit"].edata[key] = values.float()
multi_graph["test"] = deepcopy(multi_graph["fit"])
if self.config["data"]["test_graph_with_valid_edges"]:
multi_graph["test"] = dgl.add_edges(
g=multi_graph["fit"],
u=split_edge["valid"]["edge"][:, 0],
v=split_edge["valid"]["edge"][:, 1],
data={
"weight": split_edge["valid"]["weight"].unsqueeze(1).float(),
"year": split_edge["valid"]["year"].unsqueeze(1).float(),
},
)
multi_graph["predict"] = deepcopy(multi_graph["test"])
if self.config["data"]["predict_graph_with_test_edges"]:
multi_graph["predict"] = dgl.add_edges(
g=multi_graph["test"],
u=split_edge["test"]["edge"][:, 0],
v=split_edge["test"]["edge"][:, 1],
data={
"weight": split_edge["test"]["weight"].unsqueeze(1).float(),
"year": split_edge["test"]["year"].unsqueeze(1).float(),
},
)
simple_graph = {
phase: dgl.to_simple(
g=multi_graph[phase],
copy_ndata=True,
copy_edata=True,
aggregator="sum",
)
for phase in ["fit", "test", "predict"]
}
ndata = simple_graph["fit"].ndata["feat"]
if self.config["data"]["standardize_data"]:
ndata = torch.from_numpy(
standardize_data(
data=ndata.numpy(),
return_moments=False,
)
)
self.ndata = ndata
self.edata = {phase: simple_graph[phase].edata["weight"].float() for phase in ["fit", "test", "predict"]}
for phase in ["fit", "test", "predict"]:
simple_graph[phase].ndata.clear()
simple_graph[phase].edata.clear()
if stage in ("train", "fit", None):
# Train
path = (
Path("data/ogbl_collab_seal")
/ f"train_{self.config['data']['hop']}-hop_{self.config['data']['subsample_train_ratio']}-subsample.bin"
)
if not path.exists():
edges, links = self.generate_edges_and_links(
split_edge=split_edge,
graph=simple_graph["fit"],
phase="train",
)
graph_list, links = self.generate_list_of_graphs_and_links(
edges=edges,
links=links,
graph=simple_graph["fit"],
)
dgl.save_graphs(str(path), graph_list, {"links": links})
self.dataset["train"] = GraphDataSet(path=path)
# Valid
path = (
Path("data/ogbl_collab_seal") / f"valid_{self.config['data']['hop']}-hop_"
f"{self.config['data']['subsample_test_ratio']}-subsample.bin"
)
if not path.exists():
edges, links = self.generate_edges_and_links(
split_edge=split_edge,
graph=simple_graph["fit"],
phase="valid",
)
graph_list, links = self.generate_list_of_graphs_and_links(
edges=edges,
links=links,
graph=simple_graph["fit"],
)
dgl.save_graphs(str(path), graph_list, {"links": links})
self.dataset["valid"] = GraphDataSet(path=path)
if stage in ("test", None):
path = (
Path("data/ogbl_collab_seal") / f"test_{self.config['data']['hop']}-hop_"
f"{'with' if self.config['data']['test_graph_with_valid_edges'] else 'without'}-valid-edges_"
f"{self.config['data']['subsample_test_ratio']}-subsample.bin"
)
if not path.exists():
edges, links = self.generate_edges_and_links(
split_edge=split_edge,
graph=simple_graph["test"],
phase="test",
)
graph_list, links = self.generate_list_of_graphs_and_links(
edges=edges,
links=links,
graph=simple_graph["test"],
)
dgl.save_graphs(str(path), graph_list, {"links": links})
self.dataset["test"] = GraphDataSet(path=path)
if stage in ("predict", None):
path = (
Path("data/ogbl_collab_seal") / f"predict_{self.config['data']['hop']}-hop_"
f"{'with' if self.config['data']['test_graph_with_valid_edges'] else 'without'}-valid-edges_"
f"{'with' if self.config['data']['predict_graph_with_test_edges'] else 'without'}-test-edges_"
f"{self.config['data']['subsample_test_ratio']}-subsample.bin"
)
split_edge["predict"] = {
"edge": torch.randperm(100).view(-1, 2).float(), # Random collaborations
"edge_neg": torch.empty(0),
}
if not path.exists():
edges, links = self.generate_edges_and_links(
split_edge=split_edge,
graph=simple_graph["predict"],
phase="predict",
)
graph_list, links = self.generate_list_of_graphs_and_links(
edges=edges,
links=links,
graph=simple_graph["predict"],
)
dgl.save_graphs(str(path), graph_list, {"links": links})
self.dataset["predict"] = GraphDataSet(path=path)
def sample_subgraph(self, target_nodes: torch.Tensor, graph: dgl.DGLHeteroGraph) -> dgl.DGLHeteroGraph:
"""
Parameters
----------
target_nodes
graph
Returns
-------
"""
list_sample_nodes = [target_nodes]
frontiers = target_nodes
for _ in range(self.config["data"]["hop"]):
frontiers = graph.out_edges(frontiers)[1]
frontiers = torch.unique(frontiers)
list_sample_nodes.append(frontiers)
sample_nodes = torch.cat(list_sample_nodes)
sample_nodes = torch.unique(sample_nodes)
subgraph = dgl.node_subgraph(graph, sample_nodes)
u_id = int(torch.nonzero(subgraph.ndata[dgl.NID] == int(target_nodes[0]), as_tuple=False))
v_id = int(torch.nonzero(subgraph.ndata[dgl.NID] == int(target_nodes[1]), as_tuple=False))
if subgraph.has_edges_between(u_id, v_id):
link_id = subgraph.edge_ids(u_id, v_id, return_uv=True)[2]
subgraph.remove_edges(link_id)
if subgraph.has_edges_between(v_id, u_id):
link_id = subgraph.edge_ids(v_id, u_id, return_uv=True)[2]
subgraph.remove_edges(link_id)
subgraph.ndata["label"] = double_radius_node_labeling(subgraph, u_id, v_id)
return subgraph
@staticmethod
def _collate(batch: List[Tuple[dgl.DGLHeteroGraph, torch.Tensor]]) -> Tuple[dgl.DGLHeteroGraph, torch.Tensor]:
batch_graphs, batch_links = tuple(map(list, zip(*batch)))
return dgl.batch(batch_graphs), torch.stack(batch_links) # type: ignore # False Positive from MyPy
# batch_links: List[torch.Tensor]
def generate_list_of_graphs_and_links(
self,
edges: torch.Tensor,
links: torch.Tensor,
graph: dgl.DGLHeteroGraph,
) -> Tuple[List[dgl.DGLHeteroGraph], torch.Tensor]:
"""
Parameters
----------
edges
links
graph
Returns
-------
"""
sample_subgraph_partial = partial(self.sample_subgraph, graph=graph)
edge_dataset = EdgeDataSet(
edges=edges,
links=links,
transform=sample_subgraph_partial,
)
sampler = DataLoader(
edge_dataset,
batch_size=self.config["data"]["sampler"]["batch_size"],
num_workers=self.config["data"]["sampler"]["num_workers"],
pin_memory=self.config["data"]["sampler"]["pin_memory"],
shuffle=False,
collate_fn=self._collate,
)
subgraph_list = []
links_list = []
for subgraph, sub_links in tqdm(sampler, ncols=100):
label_copy = deepcopy(sub_links)
subgraph = dgl.unbatch(subgraph)
del sub_links
subgraph_list += subgraph
links_list.append(label_copy)
return subgraph_list, torch.cat(links_list)
def generate_edges_and_links(
self,
split_edge: Dict[str, Dict[str, torch.Tensor]],
graph: dgl.DGLHeteroGraph,
phase: str,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
split_edge
graph
phase
Returns
-------
"""
pos_edges = split_edge[phase]["edge"]
if phase == "train":
neg_sampler = Uniform(k=1)
eids = graph.edge_ids(u=pos_edges[:, 0], v=pos_edges[:, 1])
neg_edges = torch.stack(neg_sampler(graph, eids), dim=1)
else:
neg_edges = split_edge[phase]["edge_neg"]
pos_edges = self.subsample_edges(
edges=pos_edges,
subsample_ratio=self.config["data"]["subsample_train_ratio"]
if phase == "train"
else self.config["data"]["subsample_test_ratio"],
).long()
neg_edges = self.subsample_edges(
edges=neg_edges,
subsample_ratio=self.config["data"]["subsample_train_ratio"]
if phase == "train"
else self.config["data"]["subsample_test_ratio"],
).long()
edges = torch.cat([pos_edges, neg_edges])
links = torch.cat([torch.ones(pos_edges.shape[0], 1), torch.zeros(neg_edges.shape[0], 1)])
edges, links = self.shuffle_edges_and_links(edges=edges, links=links)
return edges, links
@staticmethod
def subsample_edges(edges: torch.Tensor, subsample_ratio: float) -> torch.Tensor:
"""
:param edges:
:param subsample_ratio:
:return:
"""
num_edges = edges.shape[0]
perm = torch.randperm(num_edges)
perm = perm[: int(subsample_ratio * num_edges)]
edges = edges[perm]
return edges
@staticmethod
def shuffle_edges_and_links(edges: torch.Tensor, links: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
edges
links
Returns
-------
"""
perm = torch.randperm(edges.shape[0])
edges = edges[perm]
links = links[perm]
return edges, links
def train_dataloader(self) -> TRAIN_DATALOADERS:
assert self.dataset["train"] is not None
return DataLoader(
dataset=self.dataset["train"],
batch_size=self.config["data"]["batch_size"],
shuffle=True,
num_workers=self.config["data"]["num_workers"],
collate_fn=dgl.dataloading.GraphCollator().collate,
pin_memory=self.config["data"]["pin_memory"],
drop_last=False,
)
def val_dataloader(self) -> EVAL_DATALOADERS:
assert self.dataset["valid"] is not None
return DataLoader(
dataset=self.dataset["valid"],
batch_size=self.config["data"]["batch_size"],
shuffle=False,
num_workers=self.config["data"]["num_workers"],
collate_fn=dgl.dataloading.GraphCollator().collate,
pin_memory=self.config["data"]["pin_memory"],
drop_last=False,
)
def test_dataloader(self) -> EVAL_DATALOADERS:
assert self.dataset["test"] is not None
return DataLoader(
dataset=self.dataset["test"],
batch_size=self.config["data"]["batch_size"],
shuffle=False,
num_workers=self.config["data"]["num_workers"],
collate_fn=dgl.dataloading.GraphCollator().collate,
pin_memory=self.config["data"]["pin_memory"],
drop_last=False,
)
def predict_dataloader(self) -> EVAL_DATALOADERS:
assert self.dataset["predict"] is not None
return DataLoader(
dataset=self.dataset["predict"],
batch_size=self.config["data"]["batch_size"],
shuffle=False,
num_workers=self.config["data"]["num_workers"],
collate_fn=dgl.dataloading.GraphCollator().collate,
pin_memory=self.config["data"]["pin_memory"],
drop_last=False,
)
| {
"alphanum_fraction": 0.5671292331,
"author": null,
"avg_line_length": 35.1626506024,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "cbe51ed3f3ed0a701412c7cfa1ee4da1b1464f81",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-12-05T12:30:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-20T15:18:26.000Z",
"max_forks_repo_head_hexsha": "a33ee332fcd32b2bc5ee5a0337c2de7cc74258df",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "davidaderup/zero_to_hero",
"max_forks_repo_path": "src/zero_to_hero/data/collab.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a33ee332fcd32b2bc5ee5a0337c2de7cc74258df",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "davidaderup/zero_to_hero",
"max_issues_repo_path": "src/zero_to_hero/data/collab.py",
"max_line_length": 120,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a33ee332fcd32b2bc5ee5a0337c2de7cc74258df",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "davidaderup/zero_to_hero",
"max_stars_repo_path": "src/zero_to_hero/data/collab.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3995,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 17511
} |
\subsection{Optimal Experimental Design Considerations}
In this section we return to the nonlinear examples presented in the previous chapter and address some choices made in how the experiment was performed.
By revisiting the examples, we demonstrate that the decisions made regarding measurement equipment and/or location have an impact on the reduction of uncertainty and accuracy of the MUD point.
Furthermore, we show that the choices made in the experimental design of previous examples are made for reasons of convenience of exposition.
Changing these assumptions does not alter the viability of the MUD point as an alternative estimator for use parameter identification problems.
We study the impact of more precise measurement devices on the convergence rates for the parameter estimates in Appendix \ref{ext:ode-example} for the problem in \ref{subsec:ode-example} of estimating the rate of exponential decay.
To complement these results, we show them alongside ones generated with equipment that measures at twice the temporal frequency.
In Section~\ref{sec:pde-oed-example}, we also highlight how an awareness of another geometric property of QoI maps---relating to their sensitivity with respect to $\param$---can help improve the accuracy of the MUD estimate.
By placing sensors in locations which exhibit greater sensitivity to the parameter for which the SIP is solved, experimenters can achieve a considerable improvement in the precision of estimating $\paramref$ with an equal number of measurements collected.
A similar complementary problem is solved where information about the sensitivity of measurement locations is used to inform improved placement of a hundred sensors.
In this example, we walk through the sorts of analyses a modeler might conduct in order to select an experimental design through simulation and show a significant improvement in the accuracy of the MUD point.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Elliptic PDE Example}\label{sec:pde-oed-example}
We make a slight modification to the Poisson problem from \ref{subsec:pde-example} to make it into a one-dimensional parameter identification problem.
This choice is primarily motivated by the goal of using visual aids to demonstrate slopes corresponding to different measurement locations.
We briefly summarize the experimental set-up again for the reader's convenience.
Consider the following Poisson problem defined on a unit domain $\Omega$:
\begin{equation}\label{eq:pde-equation-1d}
\begin{cases}
\hfill -\nabla \cdot \nabla u &= f \quad\text{on } \Omega \\
\hfill u &= 0 \quad\text{ on } \Gamma_T \cup \Gamma_B \\
\hfill \frac{\partial u}{\partial \mathbf{n}} &= g(x,\param) \quad\text{ on } \Gamma_L \\
\hfill \frac{\partial u}{\partial \mathbf{n}} &= 0 \quad\text{ on } \Gamma_R
\end{cases}
\end{equation}
where $(x_1, x_2) \in \Omega = (0,1)^2$, $\Gamma_T$ is the top, $\Gamma_B$ is the bottom, $\Gamma_L$ and $\Gamma_R$ left and right, respectively.
$\frac{\partial u}{\partial \mathbf{n}}$ denotes the outward normal direction.
We select $g$ as before in \ref{subsec:pde-example}, and show the response surface for $\paramref$ in the left of Figure~\ref{fig:pde-response}, with darker colors representing more negative values.
Now, $g$ will be parameterized by a scalar instead of a vector, representing the leading coefficient in the polynomial.
In other words, we presume knowledge of the structure of $g$ but not its exact values.
The initial density is again chosen to be uniform over the interval $\Lambda = (-4, 0)$, and $f$ is chosen to be $10\exp\{-\frac{(x_1-0.5)^2 + (x_2 - 0.5)^2}{0.02}\}$.
We demonstrate the impact of incorporating more measurements on the ability to estimate $\paramref$.
%This poses a problem for this particular experimental design since it will heavily rely on the way in which the sensor grid is indexed.
%One could place a regular grid of sensors in the interior of $\Omega$ to simulate a structured sensor array.
%However, observe that the response surface shown on the left panel of Figure~\ref{fig:pde-response} exhibit vertical symmetry about the line $x_2=0.5$ (as a result of our choice of $g$).
%For example, if the first half of indexed sensors corresponded to the bottom half of $\Omega$, the incorporation of the second half will be equivalent to having repeated observations.
%To avoid these problems, we instead simulate the sensors as being placed randomly (drawn from uniform distributions), in the interior so that index-dependence becomes irrelevant and probability theory ensures the lack of truly redundant measurement locations.
In \cite{Walsh}, the geometric quantity known as \emph{scaling} inherent to QoI maps is studied with respect to its impact on the precision of solutions to SIPs.
This property corresponds to the average local linear response of the QoI map with respect to changes in $\param$.
In short, if a QoI map (such as one induced by a single measurement), exhibits larger slopes on average over $\pspace$, then it has greater scaling and by implication, more predictive precision.
We demonstrate how an awareness of the QoI's scaling can inform the construction of a more optimal QoI map by way of better selecting the locations of measurement devices.
Here, the assessment of a map's average scaling is identified heuristically through visual inspection of slopes.
Such a graphical comparison of QoI maps can be done without prior knowledge of the scaling property and is not outside the scope of analysis that could be performed during initial investigation into an inverse problem.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Uninformed Sensor Placement}
First we show that using the sensor-placement strategy introduced in \ref{subsec:pde-example} results in many locations that provide little information to reduce uncertainty in the parameter space.
We consider a selection of $S=1000$ measurement locations in the interior of the response surface chosen by sampling a uniform density over the set $(0.05, 0.95)^2 \subset \Omega$.
We show only the first $100$ measurement locations in plots for visual clarity.
In the rightmost histogram of Figure~\ref{fig:pde-response}, we plot the data generated by each simulated sensor location, and note that many values are near zero as a result of being near boundaries or the right-side of $\Omega$.
\begin{figure}
\centering
\includegraphics[width=0.25\linewidth]{figures/pde/pde_reference_solution}
\includegraphics[width=0.25\linewidth]{figures/pde/pde_sensitivity_qoi}
\includegraphics[width=0.45\linewidth]{figures/pde/pde_qoi_response}
\caption{(Left): The function response surface for $u$ solving \eqref{eq:pde-equation-1d} with $S=100$ measurement locations highlighted in white.
The twenty most sensitive location markers are filled.
(Right): The derivative $\partial M_i / \partial \param$ is computed for the $100$ measurement locations and the distribution of the resulting collection of slopes is plotted.
(Center): The values of the response surface at the measurements is shown. The true parameter value $\paramref$ is highlighted with a vertical line, and the values of the response surface conditioned on $\paramref$ are used to form the histogram plotted vertically on the right. Many measurements are near zero.
}
\label{fig:pde-response}
\end{figure}
The measurement response as a function of $\param$ is plotted next to it in the center of \ref{fig:pde-response}, and suggests that the sensors each exhibit linear responses to changes in the parameter.
This observation can be used to visually identify that some measurements are more sensitive than others since the lines from certain sensors have steeper slopes than the majority of locations.
The majority of measurements exhibit almost no sensitivity to changes in $\param$, visually represented by the density of nearly horizontal lines (slopes of zero).
However, some of the sensors have steep slopes, which suggests higher sensitivity to changes in $\param$.
To quantify the variability in the slopes across different sensor locations, we use the smallest and largest samples values of $(\param, u(\param))$ to make a global linear estimate of each one's slope.
We plot the distribution associated with the collection of these slopes in the center histogram of \ref{fig:pde-response}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \vfill
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Informed Sensor Placement}
Instead of placing sensors throughout the square interior of $\Omega$ given by $(0.05, 0.95)^2$, we consider how the convergence results would compare if the subdomain for sensors was better selected.
In the left panel of Figure~\ref{fig:pde-response}, the most sensitive measurements are highlighted and appear near the left boundary.
Furthermore, the response surface exhibits horizontal symmetry, so we restrict locations to the bottom half of $\Omega$.
These two observations can inform a new bounding box for consideration of where measurements should be taken.
We perform the same experiment for sensors placed in $(0.05, 0.25)\times(0.05, 0.5)$ (measurement locations drawn from a uniform distribution over this region), and refer to this as the \emph{alternative} experimental design.
The first $100$ of the $1000$ sensor locations sampled is shown in the left panel of \ref{fig:pde-alt-response} and we remark that the most sensitive ones (highlighted) cluster near the center of the left boundary, where the response surface is most negative.
\begin{figure}
\centering
\includegraphics[width=0.25\linewidth]{figures/pde/pde-alt_reference_solution}
\includegraphics[width=0.25\linewidth]{figures/pde/pde-alt_sensitivity_qoi}
\includegraphics[width=0.45\linewidth]{figures/pde/pde-alt_qoi_response}
\caption{The same panels as in Figure~\ref{fig:pde-response} but for the placement of sensors informed by the observations about sensitivity incorporated into the experimental design.
The alternative placement eliminates redundancy induced by the symmetry of the response surface, and is concentrated in the regions which exhibit more sensitivity to changes in $\param$.
As a result of these choices, we observe less measurements near zero (bottom histogram), and slopes with larger magnitude (top).
}
\label{fig:pde-alt-response}
\end{figure}
For this alternative design, we show the sensitivity of sensors in the center of \ref{fig:pde-alt-response} and note that there are fewer sensors which exhibit low sensitivity to changes in $\paramref$ in contrast to \ref{fig:pde-response}.
The slopes are again shown in the center of the figure and exhibit a bimodal distribution with a larger portion of the measurements having slopes with magnitude 4-6 times greater than the mode in the center of \ref{fig:pde-response}.
There are also less measurements which take values near zero as well, shown in the rightmost panel of the figures.
The original design exhibited a strong decay in its distribution of measurement values, while the alternative design results in a much more symmetric distribution.
% \vfill
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Comparison of SIP Solutions with Different QoI Maps}
We are interested in knowing how the uncertainty around the parameter estimate (the MUD point) changes as we incorporate more (noisy) data.
To generate convergence plots, we solve the problem repeatedly for $S = 10, 50, 100, 250, 500, \text{ and } 1000$ and take the mean and variance of the twenty trials for each value of $S$.
Consider the convergence plots in Figure~\ref{fig:pde-convergence-obs}, which demonstrates the impact of increasing $S$ on our ability to resolve $\paramref$.
\begin{figure}
\centering
\includegraphics[width=0.475\linewidth]{figures/pde/pde_convergence_mud_obs_mean}
\includegraphics[width=0.475\linewidth]{figures/pde/pde_convergence_mud_obs_var}
\caption{Convergence of the MUD point (given $N=1E3$ model evaluations) for increasing numbers of observations for randomly placed sensors.
We observe similar rates of convergence for both arrangements of measurement locations, with a marked improvement in both accuracy and precision when an informed placement is used.
}
\label{fig:pde-convergence-obs}
\end{figure}
We show the mean absolute error in the left half of Figure~\ref{fig:pde-convergence-obs} and remark that two decimal places of accuracy can be achieved with approximately $250$ measurements instead of the $1000$ required in the left-half.
The convergence results for the original experimental design demonstrate that even randomly placed sensors in the interior of $\Omega$ are suitable for parameter estimation.
However, when we considered sensors that are placed with knowledge about the physical system being studied, more information is learned from the measurement equipment by placing sensors in different locations.
Using the alternative experimental design, we see a reduction of uncertainty in the panels of Figure~\ref{fig:pde-convergence-obs}, represented by the persistent vertical displacement between the regression lines for convergence.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\FloatBarrier
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% \subsection{Concluding Remarks}
% These examples demonstrate that Data--Consistent Inversion can be used for parameter identification as a viable alternative to existing methods.
% Incorporating available observations as we have done in the previous example leaves the output space scalar-valued.
% As the number of parameters grows, this output dimension resulting from such an approach effectively stays fixed.
% These situations are particularly when the DCI approach becomes advantageous over other methods, as it is less sensitive to mistakes in modeling assumptions than other methods for solving inverse problems as we saw with the linear examples in \ref{subsec:linear_examples} \ref{sec:high-dim-linear-example}.
% One can incorporate a much wider variety of prior beliefs about the relative likelihoods of parameters before data is collected without compromising predictive error.
% The DCI approach guarantees that the functional defined (for us, the weighted mean error) will remain accurate in spite of any encoded assumptions that are somehow at odds with data that is subsequently collected.
| {
"alphanum_fraction": 0.7482577819,
"author": null,
"avg_line_length": 98.477124183,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "b70c6ee2ff01f471dee4f06e65c3da41ac2a62e9",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2906b10f94960c3e75bdb48e5b8b583f59b9441e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mathematicalmichael/thesis",
"max_forks_repo_path": "extensions/mud_oed.tex",
"max_issues_count": 59,
"max_issues_repo_head_hexsha": "2906b10f94960c3e75bdb48e5b8b583f59b9441e",
"max_issues_repo_issues_event_max_datetime": "2021-11-24T17:52:57.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-12-27T23:15:05.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mathematicalmichael/thesis",
"max_issues_repo_path": "extensions/mud_oed.tex",
"max_line_length": 311,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "2906b10f94960c3e75bdb48e5b8b583f59b9441e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mathematicalmichael/thesis",
"max_stars_repo_path": "extensions/mud_oed.tex",
"max_stars_repo_stars_event_max_datetime": "2020-12-28T20:34:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-24T08:05:49.000Z",
"num_tokens": 3088,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 15067
} |
import networkx as nx
import time
from random import random
class ModularityRCommunityDiscovery(object):
minimum_improvement = 0.000001
def __init__(self, graph):
self.graph = graph
self.starting_node = None
self.community = []
self.boundary = set()
self.shell = set()
self.remove_self_loops()
def reset(self):
self.community.clear()
self.boundary.clear()
self.shell.clear()
def remove_self_loops(self):
for node in self.graph.nodes():
if self.graph.has_edge(node, node):
self.graph.remove_edge(node, node)
def set_start_node(self, start_node):
if start_node in self.graph.nodes():
self.starting_node = start_node
self.community.append(start_node)
self.boundary.add(start_node)
self.shell = set(self.graph.neighbors(start_node))
else:
print("Invalid starting node! Try with another one.")
exit(-1)
def update_sets_when_node_joins(self, node, change_boundary=False):
self.community.append(node)
if change_boundary:
self.update_boundary_when_node_joins(node)
self.update_shell_when_node_joins(node)
def update_shell_when_node_joins(self, new_node):
self.shell.update(self.graph.neighbors(new_node))
for node in self.community:
self.shell.discard(node)
def update_boundary_when_node_joins(self, new_node):
should_be_boundary = False
for neighbor in self.graph.neighbors(new_node):
if (neighbor in self.community) is False:
should_be_boundary = True
break
if should_be_boundary:
self.boundary.add(new_node)
def find_best_next_node(self, improvements):
best_candidate = None
best_improvement = -float("inf")
for candidate, improvement in sorted(
improvements.items(), key=lambda x: random()
):
if improvement > best_improvement:
best_candidate = candidate
best_improvement = improvement
return best_candidate
def community_search(self, start_node):
self.set_start_node(start_node)
modularity_r = 0.0
T = self.graph.degree[start_node]
while (
len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0
):
delta_r = (
{}
) # key: candidate nodes from the shell set, value: total improved strength after a node joins.
delta_T = (
{}
) # key: candidate nodes from the shell set, value: delta T (based on notations of the paper).
for node in self.shell:
delta_r[node], delta_T[node] = self.compute_modularity(
(modularity_r, T), node
)
new_node = self.find_best_next_node(delta_r)
if delta_r[new_node] < ModularityRCommunityDiscovery.minimum_improvement:
break
modularity_r += delta_r[new_node]
T += delta_T[new_node]
self.update_sets_when_node_joins(new_node, change_boundary=True)
return sorted(
self.community
) # sort is only for a better representation, can be ignored to boost performance.
def compute_modularity(self, auxiliary_info, candidate_node):
R, T = auxiliary_info
x, y, z = 0, 0, 0
for neighbor in self.graph.neighbors(candidate_node):
if neighbor in self.boundary:
x += 1
else:
y += 1
for neighbor in [
node
for node in self.graph.neighbors(candidate_node)
if node in self.boundary
]:
if self.should_leave_boundary(neighbor, candidate_node):
for node in self.graph.neighbors(neighbor):
if (node in self.community) and ((node in self.boundary) is False):
z += 1
return float(x - R * y - z * (1 - R)) / float(T - z + y), -z + y
def should_leave_boundary(self, possibly_leaving_node, neighbor_node):
neighbors = set(self.graph.neighbors(possibly_leaving_node))
neighbors.discard(neighbor_node)
for neighbor in neighbors:
if (neighbor in self.community) is False:
return False
return True
| {
"alphanum_fraction": 0.5976370932,
"author": null,
"avg_line_length": 35.3228346457,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "97e04a8b6869e3aac56a72d0d3e99e0693f4fb5c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 70,
"max_forks_repo_forks_event_max_datetime": "2022-03-27T12:58:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-02-15T19:04:29.000Z",
"max_forks_repo_head_hexsha": "590e145429cda1db4d3671c994c502bedd77f108",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "xing-lab-pitt/cdlib",
"max_forks_repo_path": "cdlib/algorithms/internal/modularity_r.py",
"max_issues_count": 130,
"max_issues_repo_head_hexsha": "590e145429cda1db4d3671c994c502bedd77f108",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T10:58:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-10T19:35:55.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "xing-lab-pitt/cdlib",
"max_issues_repo_path": "cdlib/algorithms/internal/modularity_r.py",
"max_line_length": 108,
"max_stars_count": 248,
"max_stars_repo_head_hexsha": "590e145429cda1db4d3671c994c502bedd77f108",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "xing-lab-pitt/cdlib",
"max_stars_repo_path": "cdlib/algorithms/internal/modularity_r.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T04:57:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-17T05:31:22.000Z",
"num_tokens": 924,
"path": null,
"reason": "import networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 4486
} |
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.logic
import Mathlib.Lean3Lib.init.data.nat.basic
import Mathlib.Lean3Lib.init.data.bool.basic
import Mathlib.Lean3Lib.init.propext
universes u u_1 v w
namespace Mathlib
protected instance list.inhabited (α : Type u) : Inhabited (List α) := { default := [] }
namespace list
protected def has_dec_eq {α : Type u} [s : DecidableEq α] : DecidableEq (List α) := sorry
protected instance decidable_eq {α : Type u} [DecidableEq α] : DecidableEq (List α) :=
list.has_dec_eq
@[simp] protected def append {α : Type u} : List α → List α → List α := sorry
protected instance has_append {α : Type u} : Append (List α) := { append := list.append }
protected def mem {α : Type u} : α → List α → Prop := sorry
protected instance has_mem {α : Type u} : has_mem α (List α) := has_mem.mk list.mem
protected instance decidable_mem {α : Type u} [DecidableEq α] (a : α) (l : List α) :
Decidable (a ∈ l) :=
sorry
protected instance has_emptyc {α : Type u} : has_emptyc (List α) := has_emptyc.mk []
protected def erase {α : Type u_1} [DecidableEq α] : List α → α → List α := sorry
protected def bag_inter {α : Type u_1} [DecidableEq α] : List α → List α → List α := sorry
protected def diff {α : Type u_1} [DecidableEq α] : List α → List α → List α := sorry
@[simp] def length {α : Type u} : List α → ℕ := sorry
def empty {α : Type u} : List α → Bool := sorry
@[simp] def nth {α : Type u} : List α → ℕ → Option α := sorry
@[simp] def nth_le {α : Type u} (l : List α) (n : ℕ) : n < length l → α := sorry
@[simp] def head {α : Type u} [Inhabited α] : List α → α := sorry
@[simp] def tail {α : Type u} : List α → List α := sorry
def reverse_core {α : Type u} : List α → List α → List α := sorry
def reverse {α : Type u} : List α → List α := fun (l : List α) => reverse_core l []
@[simp] def map {α : Type u} {β : Type v} (f : α → β) : List α → List β := sorry
@[simp] def map₂ {α : Type u} {β : Type v} {γ : Type w} (f : α → β → γ) :
List α → List β → List γ :=
sorry
def map_with_index_core {α : Type u} {β : Type v} (f : ℕ → α → β) : ℕ → List α → List β := sorry
/-- Given a function `f : ℕ → α → β` and `as : list α`, `as = [a₀, a₁, ...]`, returns the list
`[f 0 a₀, f 1 a₁, ...]`. -/
def map_with_index {α : Type u} {β : Type v} (f : ℕ → α → β) (as : List α) : List β :=
map_with_index_core f 0 as
def join {α : Type u} : List (List α) → List α := sorry
def filter_map {α : Type u} {β : Type v} (f : α → Option β) : List α → List β := sorry
def filter {α : Type u} (p : α → Prop) [decidable_pred p] : List α → List α := sorry
def partition {α : Type u} (p : α → Prop) [decidable_pred p] : List α → List α × List α := sorry
def drop_while {α : Type u} (p : α → Prop) [decidable_pred p] : List α → List α := sorry
/-- `after p xs` is the suffix of `xs` after the first element that satisfies
`p`, not including that element.
```lean
after (eq 1) [0, 1, 2, 3] = [2, 3]
drop_while (not ∘ eq 1) [0, 1, 2, 3] = [1, 2, 3]
```
-/
def after {α : Type u} (p : α → Prop) [decidable_pred p] : List α → List α := sorry
def span {α : Type u} (p : α → Prop) [decidable_pred p] : List α → List α × List α := sorry
def find_index {α : Type u} (p : α → Prop) [decidable_pred p] : List α → ℕ := sorry
def index_of {α : Type u} [DecidableEq α] (a : α) : List α → ℕ := find_index (Eq a)
def remove_all {α : Type u} [DecidableEq α] (xs : List α) (ys : List α) : List α :=
filter (fun (_x : α) => ¬_x ∈ ys) xs
def update_nth {α : Type u} : List α → ℕ → α → List α := sorry
def remove_nth {α : Type u} : List α → ℕ → List α := sorry
@[simp] def drop {α : Type u} : ℕ → List α → List α := sorry
@[simp] def take {α : Type u} : ℕ → List α → List α := sorry
@[simp] def foldl {α : Type u} {β : Type v} (f : α → β → α) : α → List β → α := sorry
@[simp] def foldr {α : Type u} {β : Type v} (f : α → β → β) (b : β) : List α → β := sorry
def any {α : Type u} (l : List α) (p : α → Bool) : Bool :=
foldr (fun (a : α) (r : Bool) => p a || r) false l
def all {α : Type u} (l : List α) (p : α → Bool) : Bool :=
foldr (fun (a : α) (r : Bool) => p a && r) tt l
def bor (l : List Bool) : Bool := any l id
def band (l : List Bool) : Bool := all l id
def zip_with {α : Type u} {β : Type v} {γ : Type w} (f : α → β → γ) : List α → List β → List γ :=
sorry
def zip {α : Type u} {β : Type v} : List α → List β → List (α × β) := zip_with Prod.mk
def unzip {α : Type u} {β : Type v} : List (α × β) → List α × List β := sorry
protected def insert {α : Type u} [DecidableEq α] (a : α) (l : List α) : List α :=
ite (a ∈ l) l (a :: l)
protected instance has_insert {α : Type u} [DecidableEq α] : has_insert α (List α) :=
has_insert.mk list.insert
protected instance has_singleton {α : Type u} : has_singleton α (List α) :=
has_singleton.mk fun (x : α) => [x]
protected instance is_lawful_singleton {α : Type u} [DecidableEq α] :
is_lawful_singleton α (List α) :=
is_lawful_singleton.mk
fun (x : α) => (fun (this : ite (x ∈ []) [] [x] = [x]) => this) (if_neg not_false)
protected def union {α : Type u} [DecidableEq α] (l₁ : List α) (l₂ : List α) : List α :=
foldr insert l₂ l₁
protected instance has_union {α : Type u} [DecidableEq α] : has_union (List α) :=
has_union.mk list.union
protected def inter {α : Type u} [DecidableEq α] (l₁ : List α) (l₂ : List α) : List α :=
filter (fun (_x : α) => _x ∈ l₂) l₁
protected instance has_inter {α : Type u} [DecidableEq α] : has_inter (List α) :=
has_inter.mk list.inter
@[simp] def repeat {α : Type u} (a : α) : ℕ → List α := sorry
def range_core : ℕ → List ℕ → List ℕ := sorry
def range (n : ℕ) : List ℕ := range_core n []
def iota : ℕ → List ℕ := sorry
def enum_from {α : Type u} : ℕ → List α → List (ℕ × α) := sorry
def enum {α : Type u} : List α → List (ℕ × α) := enum_from 0
@[simp] def last {α : Type u} (l : List α) : l ≠ [] → α := sorry
def ilast {α : Type u} [Inhabited α] : List α → α := sorry
def init {α : Type u} : List α → List α := sorry
def intersperse {α : Type u} (sep : α) : List α → List α := sorry
def intercalate {α : Type u} (sep : List α) (xs : List (List α)) : List α :=
join (intersperse sep xs)
protected def bind {α : Type u} {β : Type v} (a : List α) (b : α → List β) : List β :=
join (map b a)
protected def ret {α : Type u} (a : α) : List α := [a]
protected def lt {α : Type u} [HasLess α] : List α → List α → Prop := sorry
protected instance has_lt {α : Type u} [HasLess α] : HasLess (List α) := { Less := list.lt }
protected instance has_decidable_lt {α : Type u} [HasLess α] [h : DecidableRel Less] (l₁ : List α)
(l₂ : List α) : Decidable (l₁ < l₂) :=
sorry
protected def le {α : Type u} [HasLess α] (a : List α) (b : List α) := ¬b < a
protected instance has_le {α : Type u} [HasLess α] : HasLessEq (List α) := { LessEq := list.le }
protected instance has_decidable_le {α : Type u} [HasLess α] [h : DecidableRel Less] (l₁ : List α)
(l₂ : List α) : Decidable (l₁ ≤ l₂) :=
not.decidable
theorem le_eq_not_gt {α : Type u} [HasLess α] (l₁ : List α) (l₂ : List α) : l₁ ≤ l₂ = (¬l₂ < l₁) :=
rfl
theorem lt_eq_not_ge {α : Type u} [HasLess α] [DecidableRel Less] (l₁ : List α) (l₂ : List α) :
l₁ < l₂ = (¬l₂ ≤ l₁) :=
(fun (this : l₁ < l₂ = (¬¬l₁ < l₂)) => this)
(Eq.symm (propext (decidable.not_not_iff (l₁ < l₂))) ▸ rfl)
/-- `is_prefix_of l₁ l₂` returns `tt` iff `l₁` is a prefix of `l₂`. -/
def is_prefix_of {α : Type u} [DecidableEq α] : List α → List α → Bool := sorry
/-- `is_suffix_of l₁ l₂` returns `tt` iff `l₁` is a suffix of `l₂`. -/
def is_suffix_of {α : Type u} [DecidableEq α] (l₁ : List α) (l₂ : List α) : Bool :=
is_prefix_of (reverse l₁) (reverse l₂)
end list
namespace bin_tree
def to_list {α : Type u} (t : bin_tree α) : List α := to_list_aux t []
end Mathlib | {
"alphanum_fraction": null,
"author": "AurelienSaue",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/AurelienSaue-Mathlib4_auto/Mathlib4_auto-590df64109b08190abe22358fabc3eae000943f2/Mathlib/Lean3Lib/init/data/list/basic_auto.lean",
"reason": null,
"repo": "Mathlib4_auto",
"save_path": "github-repos/lean/AurelienSaue-Mathlib4_auto",
"sha": "590df64109b08190abe22358fabc3eae000943f2",
"size": null
} |
[STATEMENT]
lemma image_Un_conv: "f ` (\<Union>p\<in>dom \<Gamma>. \<Union>Z. {x p Z}) = (\<Union>p\<in>dom \<Gamma>. \<Union>Z. {f (x p Z)})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f ` (\<Union>p\<in>dom \<Gamma>. \<Union>Z. {x p Z}) = (\<Union>p\<in>dom \<Gamma>. \<Union>Z. {f (x p Z)})
[PROOF STEP]
by (auto iff: not_None_eq) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Simpl_HoareTotalProps",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 154,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
%%*****************************************************************
%% NTdirfun: compute (dX,dZ), given dy, for the NT direction.
%%*****************************************************************
%% SDPT3: version 4.0
%% Copyright (c) 1997 by
%% Kim-Chuan Toh, Michael J. Todd, Reha H. Tutuncu
%% Last Modified: 16 Sep 2004
%%*****************************************************************
function [dX,dy,dZ] = NTdirfun(blk,At,par,Rd,EinvRc,xx,m);
global solve_ok
dX = cell(size(blk,1),1); dZ = cell(size(blk,1),1); dy = [];
if (any(isnan(xx)) | any(isinf(xx)))
solve_ok = 0;
fprintf('\n linsysolve: solution contains NaN or inf.');
return;
end
%%
dy = xx(1:m);
count = m;
%%
for p=1:size(blk,1)
pblk = blk(p,:);
if strcmp(pblk{1},'l')
%%dZ{p} = Rd{p} - At{p}*dy;
dZ(p) = ops(Rd(p),'-',Atyfun(pblk,At(p,:),[],[],dy));
tmp = par.dd{p}.*dZ{p};
dX{p} = EinvRc{p} - tmp;
elseif strcmp(pblk{1},'q')
%%dZ{p} = Rd{p} - At{p}*dy;
dZ(p) = ops(Rd(p),'-',Atyfun(pblk,At(p,:),[],[],dy));
tmp = par.dd{p}.*dZ{p} + qops(pblk,qops(pblk,dZ{p},par.ee{p},1),par.ee{p},3);
dX{p} = EinvRc{p} - tmp;
elseif strcmp(pblk{1},'s')
%%dZ{p} = Rd{p} - smat(pblk,At{p}*dy(par.permA(p,:)),par.isspAy(p));
dZ(p) = ops(Rd(p),'-',Atyfun(pblk,At(p,:),par.permA(p,:),par.isspAy(p),dy));
tmp = Prod3(pblk,par.W{p},dZ{p},par.W{p},1);
dX{p} = EinvRc{p}-tmp;
elseif strcmp(pblk{1},'u');
n = sum(pblk{2});
dZ{p} = zeros(n,1);
dX{p} = xx(count+[1:n]);
count = count + n;
end
end
%%*******************************************************************
| {
"alphanum_fraction": null,
"author": "yu-jiang",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/yu-jiang-radpbook/radpbook-88b9fa7d0a541099cdd1ac29383c89e087d1d895/tools/SDPT3-4.0/SDPT3-4.0/Solver/NTdirfun.m",
"reason": null,
"repo": "radpbook",
"save_path": "github-repos/MATLAB/yu-jiang-radpbook",
"sha": "88b9fa7d0a541099cdd1ac29383c89e087d1d895",
"size": null
} |
# -*- coding: utf-8 -*-
"""
@Project: cocopulas
@File: frank.py
@Author: Lou Xiayin
@Date: 2020/4/21
@Purpose:
@Description:
"""
from typing import Union
import numpy as np
from cocopulas.utils import split_matrix
from cocopulas.archimedean.base import ArchimedeanBaseCopula, ArchimedeanTypes
from cocopulas.core.types import Array
class Frank(ArchimedeanBaseCopula):
copula_name = ArchimedeanTypes.FRANK
alpha_intervals = [float("-inf"), float("inf")]
alpha_invalids = [0]
def cumulative_distribution_function(self, x: Array) -> Union[float, np.ndarray]:
self.check_fit()
u, v = split_matrix(x)
a = self.alpha
up = np.multiply(np.exp(-a * u) - 1, np.exp(-a * v) - 1)
down = np.exp(-a) - 1
return -np.divide(1, a) * np.log(1 + np.divide(up, down))
def probability_density_function(self, x: Array) -> Union[float, np.ndarray]:
self.check_fit()
u, v = split_matrix(x)
a = self.alpha
up = a * (1 - np.exp(-a)) * np.exp(-a * (u + v))
c = np.exp(-a) - 1 + (np.exp(-a * u) - 1) * (np.exp(-a * v) - 1)
down = np.power(c, 2)
return np.divide(up, down)
def generator(self, x: Array) -> Union[float, np.ndarray]:
a = self.alpha
up = np.exp(-a * x) - 1
down = np.exp(-a) - 1
return -np.log(np.divide(up, down)) | {
"alphanum_fraction": 0.5956043956,
"author": null,
"avg_line_length": 31.7441860465,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "103c4877bb5620565100e4e1da932751c736c8e5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a1ba5e2e2abc8244b2901a6aced28b3b36e7075f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Vezarachan/cocopulas",
"max_forks_repo_path": "cocopulas/archimedean/frank.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a1ba5e2e2abc8244b2901a6aced28b3b36e7075f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Vezarachan/cocopulas",
"max_issues_repo_path": "cocopulas/archimedean/frank.py",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a1ba5e2e2abc8244b2901a6aced28b3b36e7075f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Vezarachan/cocopulas",
"max_stars_repo_path": "cocopulas/archimedean/frank.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 416,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1365
} |
import random
import numpy as np
from pathlib import Path
import pandas as pd
from utils.rmat import rmat_to_file
from utils.probability import beta_rvs_shifted, beta_rvs_discrete_shifted
def generate_result_dataset(
from_file = True,
custom_weights = [1] *8,
param_file = "../baseline_dataset/parameters.csv",
name = "result",
dataset_folder = '../resulting_dataset',
dataset_size = 10000,
edges_between = (1000,1000000),
multiprocess = False):
if from_file:
df = pd.read_csv(param_file)
params = df[df["name"] == name].iloc[-1][[
"alfa_a", "beta_a", "alfa_b", "beta_b", "alfa_c", "beta_c", "alfa_N", "beta_N"
]]
else:
params = custom_weights
print(params)
alfa_a, beta_a, alfa_b, beta_b, alfa_c, beta_c, alfa_N, beta_N = params
Path(dataset_folder,'graphs').mkdir(parents=True, exist_ok=True)
parameters = []
for i in range(0,dataset_size):
E = random.randint(edges_between[0], edges_between[1])
n_0 = np.floor(np.sqrt(E * 2))
N = beta_rvs_discrete_shifted(alfa_N, beta_N, n_0, E + 1)
a = beta_rvs_shifted(alfa_a, beta_a, 0.25, 1)
b = beta_rvs_shifted(alfa_b, beta_b, (1-a)/3, min(a, 1-a))
c = beta_rvs_shifted(alfa_c, beta_c, (1-a-b)/2, min(b, 1-a-b))
d = 1-a-b-c
parameters.append({
"i": i, "N": N, "E": E,
"a": a, "b": b, "c": c, "d": d
})
if multiprocess:
from pebble import ProcessPool
from utils.multiprocess import pebble_timeout_callback
with ProcessPool() as pool:
for param in parameters:
future = pool.schedule(rmat_to_file,
args=(param['N'],param['E'],param['a'],param['b'],param['c'],param['d'],dataset_folder, param['i']),
timeout=300)
future.add_done_callback(pebble_timeout_callback)
else:
for param in parameters:
rmat_to_file(param['N'],param['E'],param['a'],param['b'],param['c'],param['d'],dataset_folder, param['i']) | {
"alphanum_fraction": 0.5929957407,
"author": null,
"avg_line_length": 33.015625,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b703c7631d5eb919b7de63c26505135bbe28adde",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e36b48fab57baee5d2d30d068ccd32776d6efe37",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "BNN-UPC/graphlaxy",
"max_forks_repo_path": "processes/result_dataset.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e36b48fab57baee5d2d30d068ccd32776d6efe37",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "BNN-UPC/graphlaxy",
"max_issues_repo_path": "processes/result_dataset.py",
"max_line_length": 121,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e36b48fab57baee5d2d30d068ccd32776d6efe37",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "BNN-UPC/graphlaxy",
"max_stars_repo_path": "processes/result_dataset.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 597,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2113
} |
from sympy import Matrix, symbols
from sympy.plotting import plot3d
a, b = symbols('a b')
x = Matrix([a, b])
A = Matrix([[1, 0], [0, 1]])
A.is_positive_definite
# True
A.is_positive_semidefinite
# True
p = plot3d((x.T*A*x)[0, 0], (a, -1, 1), (b, -1, 1))
| {
"alphanum_fraction": 0.62890625,
"author": null,
"avg_line_length": 19.6923076923,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f095d0bdbb969ade70683db98a8a8a3692c6d748",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 38,
"max_forks_repo_forks_event_max_datetime": "2021-12-02T13:19:43.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-08T18:48:27.000Z",
"max_forks_repo_head_hexsha": "ec1b7285b5fc5b3ba2eafa32470066122ae84b52",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "willtryagain/sympy_doc",
"max_forks_repo_path": "dev/modules/matrices/matrices-6.py",
"max_issues_count": 31,
"max_issues_repo_head_hexsha": "ec1b7285b5fc5b3ba2eafa32470066122ae84b52",
"max_issues_repo_issues_event_max_datetime": "2021-11-15T10:58:15.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-27T07:16:19.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "willtryagain/sympy_doc",
"max_issues_repo_path": "dev/modules/matrices/matrices-6.py",
"max_line_length": 51,
"max_stars_count": 20,
"max_stars_repo_head_hexsha": "ec1b7285b5fc5b3ba2eafa32470066122ae84b52",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "willtryagain/sympy_doc",
"max_stars_repo_path": "dev/modules/matrices/matrices-6.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-19T04:03:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-28T01:08:13.000Z",
"num_tokens": 103,
"path": null,
"reason": "from sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 256
} |
\subsection{Endomorphisms as group actions}
We can view each member of the group \(g\) as a homomorphim on \(s\).
Where \(s\) is a vector space \(V\), the representation on each group member is an invertible square matrix.
If the set we use is the vector space \(V\), then we can represent each group element with a square matrix acting on \(V\).
Faithful means \(a\ne b\) holds for repesentation too.
Representation theory. groups defined by \(ab=c\). if we can match each eleemnt to amatrix where this holds we have represented the matrix.
| {
"alphanum_fraction": 0.7395264117,
"author": null,
"avg_line_length": 39.2142857143,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "4149d1732961f462413c1c158d17a73f45158589",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/geometry/endomorphisms/04-02-action.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/geometry/endomorphisms/04-02-action.tex",
"max_line_length": 139,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/geometry/endomorphisms/04-02-action.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 138,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 549
} |
\documentclass{beamer}\usepackage[]{graphicx}\usepackage[]{color}
%% maxwidth is the original width if it is less than linewidth
%% otherwise use linewidth (to make sure the graphics do not exceed the margin)
\makeatletter
\def\maxwidth{ %
\ifdim\Gin@nat@width>\linewidth
\linewidth
\else
\Gin@nat@width
\fi
}
\makeatother
\definecolor{fgcolor}{rgb}{0.345, 0.345, 0.345}
\newcommand{\hlnum}[1]{\textcolor[rgb]{0.686,0.059,0.569}{#1}}%
\newcommand{\hlstr}[1]{\textcolor[rgb]{0.192,0.494,0.8}{#1}}%
\newcommand{\hlcom}[1]{\textcolor[rgb]{0.678,0.584,0.686}{\textit{#1}}}%
\newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}}%
\newcommand{\hlstd}[1]{\textcolor[rgb]{0.345,0.345,0.345}{#1}}%
\newcommand{\hlkwa}[1]{\textcolor[rgb]{0.161,0.373,0.58}{\textbf{#1}}}%
\newcommand{\hlkwb}[1]{\textcolor[rgb]{0.69,0.353,0.396}{#1}}%
\newcommand{\hlkwc}[1]{\textcolor[rgb]{0.333,0.667,0.333}{#1}}%
\newcommand{\hlkwd}[1]{\textcolor[rgb]{0.737,0.353,0.396}{\textbf{#1}}}%
\let\hlipl\hlkwb
\usepackage{framed}
\makeatletter
\newenvironment{kframe}{%
\def\at@end@of@kframe{}%
\ifinner\ifhmode%
\def\at@end@of@kframe{\end{minipage}}%
\begin{minipage}{\columnwidth}%
\fi\fi%
\def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep
\colorbox{shadecolor}{##1}\hskip-\fboxsep
% There is no \\@totalrightmargin, so:
\hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}%
\MakeFramed {\advance\hsize-\width
\@totalleftmargin\z@ \linewidth\hsize
\@setminipage}}%
{\par\unskip\endMakeFramed%
\at@end@of@kframe}
\makeatother
\definecolor{shadecolor}{rgb}{.97, .97, .97}
\definecolor{messagecolor}{rgb}{0, 0, 0}
\definecolor{warningcolor}{rgb}{1, 0, 1}
\definecolor{errorcolor}{rgb}{1, 0, 0}
\newenvironment{knitrout}{}{} % an empty environment to be redefined in TeX
\usepackage{alltt}
\usepackage{default}
\usepackage{animate} %need the animate.sty file
\usepackage{graphicx}
%\graphicspath{{/home/sahir/Dropbox/jobs/laval/minicours/slides/}}
\usepackage{hyperref, url}
%\usepackage[round,sort]{natbib} % bibliography omit 'round' option if you prefer square brackets
%\bibliographystyle{apalike}
\usepackage{biblatex}
\bibliography{bib.bib}
% Removes icon in bibliography
\setbeamertemplate{bibliography item}[text]
\usepackage[normalem]{ulem}
\setbeamertemplate{theorems}[numbered]
\usepackage[final]{pdfpages}
%\newtheorem{prop}{Proposition}
%\newenvironment{theoremc}[1]
%{\begin{shaded}\begin{theorem}[#1]}
% {\end{theorem}\end{shaded}}
%\newtheorem{examplefirst}{Example}
%\newtheorem{examplesecond}{Example}
%\newenvironment<>{examplefirst}[1][]{%
% \setbeamercolor{block title example}{bg=lightgray}%
% \begin{example}#2[#1]}{\end{example}}
%\newenvironment<>{examplesecond}[1][]{%
% \setbeamercolor{block title example}{fg=white,bg=blue!75!black}%
% \begin{example}#2[#1]}{\end{example}}
%\usepackage{amsthm}
\usepackage[figurename=Fig.]{caption}
\usepackage{subfig}
\usepackage{tikz, pgfplots,epsfig}
\usetikzlibrary{arrows,shapes.geometric}
\usepackage{color, colortbl,xcolor}
\definecolor{lightgray}{RGB}{200,200,200}
\definecolor{palegray}{RGB}{221,221,221}
\definecolor{myblue}{RGB}{0,89,179}
\usepackage{comment}
\setbeamercolor{frametitle}{fg=myblue}
\setbeamercolor{section in head/foot}{bg=myblue, fg=white}
\setbeamercolor{author in head/foot}{bg=myblue}
\setbeamercolor{date in head/foot}{bg=myblue}
\usepackage{shadethm}
%\colorlet{shadecolor}{blue!15}
\colorlet{shadecolor}{palegray}
%\setlength{\shadeboxrule}{.4pt}
\newshadetheorem{thm}{Theorem}
\newshadetheorem{defm}{Definition}
\newshadetheorem{exm}{Exercise}
\newshadetheorem{remarkm}{Remark}
%\definecolor{shadethmcolor}{HTML}{EDF8FF}
\definecolor{shadethmcolor}{RGB}{221,221,221}
%\definecolor{shaderulecolor}{HTML}{45CFFF}
\definecolor{shaderulecolor}{RGB}{0,89,179}
\setlength{\shadeboxrule}{.4pt}
\usepackage{array}
\newcolumntype{L}{>{\centering\arraybackslash}m{3cm}} % used for text wrapping in ctable
\usepackage{ctable}
\usepackage[utf8]{inputenc}
\usepackage{fontenc}
\usepackage{pifont}% http://ctan.org/pkg/pifont
\newcommand{\cmark}{\ding{51}}%
\newcommand{\xmark}{\ding{55}}%
\def\widebar#1{\overline{#1}}
\definecolor{whitesmoke}{rgb}{0.96, 0.96, 0.96}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{bm}
\def\transpose{{\sf{T}}}
\def\E{{\skew0\bm{E}}}
\def\Xvec{{\skew0\bm{X}}}
\def\Xveca{{\skew0\bm{X}}_1}
\def\Xvecb{{\skew0\bm{X}}_2}
\def\Yvec{{\skew0\bm{Y}}}
\def\bmY{{\skew0\bm{Y}}}
\def\bmX{{\skew0\bm{X}}}
\def\bmy{{\skew0\bm{y}}}
\def\bmG{{\skew0\bm{G}}}
\def\bmS{{\skew0\bm{S}}}
\def\bmA{{\skew0\bm{A}}}
\def\bmB{{\skew0\bm{B}}}
\def\bmD{{\skew0\bm{D}}}
\def\bmI{{\skew0\bm{I}}}
\def\bmV{{\skew0\bm{V}}}
\def\bmU{{\skew0\bm{U}}}
\def\bv{{\skew0\bm{v}}}
\def\bw{{\skew0\bm{w}}}
\def\bmm{{\skew0\bm{m}}}
\def\bmzero{{\skew0\bm{0}}}
\def\bx{{\skew0\bm{x}}}
\def\xveca{{\skew0\bm{x}}_1}
\def\xvecb{{\skew0\bm{x}}_2}
\def\N{{\skew0\mathcal{N}}}
\def\T{{\small T}}
\def\mvec{{\skew0\bm{m}}}
\def\bmmu{{\skew0\bm{\mu}}}
\def\muvec{{\skew0\bm{\mu}}}
\def\balpha{{\skew0\bm{\alpha}}}
\def\bbeta{{\skew0\bm{\beta}}}
\def\bmtheta{{\skew0\bm{\theta}}}
\def\btheta{{\skew0\bm{\theta}}}
\def\cvec{{\skew0\mathbf{c}}}
\def\Xbar{\overline{X}}
\definecolor{lightgray}{rgb}{0.91,0.91,0.91}
\definecolor{purpleblue}{rgb}{0.50,0.50,1.00}
\usepackage{fontspec}
%\setsansfont{Fira Sans}
%\setmonofont{Fira Mono}
\setsansfont[ItalicFont={Fira Sans Light Italic},BoldFont={Fira Sans},BoldItalicFont={Fira Sans Italic}]{Fira Sans Light}
\setmonofont[BoldFont={Fira Mono Medium}]{Fira Mono}
\setbeamercolor{itemize item}{fg=myblue}
\setbeamertemplate{itemize item}[square]
\setbeamertemplate{navigation symbols}{\usebeamercolor[fg]{title in head/foot}\usebeamerfont{title in head/foot}\insertframenumber}
\setbeamertemplate{footline}{}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{exercise}[theorem]{Exercise}
\titlegraphic{\hfill\includegraphics[height=1cm]{mcgill_logo.png}}
%% You also use hyperref, and pick colors
\hypersetup{colorlinks,citecolor=orange,filecolor=red,linkcolor=brown,urlcolor=blue}
\newcommand {\framedgraphiccaption}[2] {
\begin{figure}
\centering
\includegraphics[width=\textwidth,height=0.8\textheight,keepaspectratio]{#1}
\caption{#2}
\end{figure}
}
\newcommand {\framedgraphic}[1] {
\begin{figure}
\centering
\includegraphics[width=\textwidth,height=0.9\textheight,keepaspectratio]{#1}
\end{figure}
}
\AtBeginSection[]{
\begin{frame}
\vfill
\centering
\begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
\usebeamerfont{title}\insertsectionhead\par%
\end{beamercolorbox}
\vfill
\end{frame}
}
\newcommand\Wider[2][3em]{%
\makebox[\linewidth][c]{%
\begin{minipage}{\dimexpr\textwidth+#1\relax}
\raggedright#2
\end{minipage}%
}%
}
\newcommand{\blue}[1]{\textcolor{blue}{#1}}
\newcommand{\red}[1]{\textcolor{red}{#1}}
%\makeatother
\usepackage{xparse}
\NewDocumentCommand\mylist{>{\SplitList{;}}m}
{
\begin{itemize}
\ProcessList{#1}{ \insertitem }
\end{itemize}
}
\NewDocumentCommand\mynum{>{\SplitList{;}}m}
{
\begin{enumerate}
\ProcessList{#1}{ \insertitem }
\end{enumerate}
}
\newcommand\insertitem[1]{\item #1}
\newcommand\FrameText[1]{%
\begin{textblock*}{\paperwidth}(0pt,\textheight)
\raggedright #1\hspace{.5em}
\end{textblock*}}
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\begin{document}
%\sffamily
%\title{Introduction to Regression Trees}
%\author{Sahir Bhatnagar \inst{1}}
%\author[shortname]{Sahir Rai Bhatnagar, PhD Candidate (Biostatistics) }
%\institute[shortinst]{Department of Epidemiology, Biostatistics and Occupational Health}
\title{Inference about a Population Rate ($\lambda$)}
\subtitle{\href{https://www.dropbox.com/s/b5q7vqo2ev6k2me/EPIB607intensity-model-inference-plan-2018.pdf?dl=0}{JH notes on rates}}
\author{Sahir Bhatnagar and James Hanley}
\institute{
EPIB 607\\
Department of Epidemiology, Biostatistics, and Occupational Health\\
McGill University\\
\vspace{0.1 in}
\texttt{sahir.bhatnagar@mcgill.ca}\\
\texttt{\url{https://sahirbhatnagar.com/EPIB607/}}}
%\date
\maketitle
\section{Poisson Model for Sampling Variability of a Count in a Given Amount of ``Experience''}
\begin{frame}{Motivating example: Demand for medical care}
\begin{itemize}
\setlength\itemsep{1em}
\item Data from the US National Medical Expenditure Survey (NMES) for 1987/88
\item 4406 individuals, aged 66 and over, who are covered by Medicare, a public insurance program
\item The objective of the study was to model the demand for medical care - as captured by \underline{the number} of physician/non-physician office and hospital outpatient visits - by the covariates available for the patients.
\end{itemize}
\end{frame}
\begin{frame}[fragile]{Motivating example: Demand for medical care}
\vspace*{-0.2in}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-1-1}
}
\end{knitrout}
\end{frame}
\begin{frame}{Some observations about the previous plot}
\begin{itemize}
\setlength\itemsep{1em}
\item Discrete outcome $\to$ 1, 2, 3, ... visits \pause
\item There are rare events, e.g. 1 individual with 89 visits \pause
\item The data are far from normally distributed \pause
\item Can theoretically go on forever
\end{itemize}
\end{frame}
\begin{frame}{The Poisson Distribution}
\begin{itemize}
\setlength\itemsep{1em}
\item The binomial distribution was derived by starting with an experiment consisting of trials or draws and applying the laws of probability to various outcomes of the experiment. \pause
\item There is no simple experiment on which the Poisson distribution is based, although we will shortly describe how it can be obtained by certain limiting operations.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{The Poisson Distribution: what it is, and features}
\begin{itemize}
\small
\setlength\itemsep{1em}
\item The (infinite number of) probabilities $P_{0}, P_{1}, ..., P_{y}, ..., $ of observing
$Y = 0, 1, 2, \dots , y, \dots $ events in a given amount of ``experience.'' \pause
\item These probabilities, $P(Y = k) \to$ \texttt{dpois()}, are governed by a single parameter, the mean $E[Y] = \mu$ which represents the expected \textbf{number} of events in the amount of experience actually studied.\pause
\item We say that a random variable $Y \sim \textrm{Poisson}(\mu)$ distribution if
\[ P(Y=k) = \frac{\mu^k}{k!}e^{-\mu}, \quad k = 0, 1, 2, \ldots\]
\pause
\item Note: in \texttt{dpois()} $\mu$ is referred to as \texttt{lambda}
\item Note the distinction between $\mu$ and $\lambda$
\begin{itemize}
\item $\mu$: expected \textbf{number} of events
\item $\lambda$: \textbf{rate} parameter
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}[fragile]{The probability mass function for $\mu=0.5$}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlkwd{dpois}\hlstd{(}\hlkwc{x} \hlstd{=} \hlnum{0}\hlopt{:}\hlnum{15}\hlstd{,} \hlkwc{lambda} \hlstd{=} \hlnum{0.5}\hlstd{)}
\end{alltt}
\end{knitrout}
\vspace*{-0.5in}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-3-1}
}
\end{knitrout}
\end{frame}
\begin{frame}[fragile]{The probability mass function for $\mu=10$}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlkwd{dpois}\hlstd{(}\hlkwc{x} \hlstd{=} \hlnum{0}\hlopt{:}\hlnum{15}\hlstd{,} \hlkwc{lambda} \hlstd{=} \hlnum{10}\hlstd{)}
\end{alltt}
\end{knitrout}
\vspace*{-0.5in}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-5-1}
}
\end{knitrout}
\end{frame}
\begin{frame}[fragile]{The probability mass function}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-6-1}
}
\end{knitrout}
\end{frame}
\begin{frame}
\frametitle{The Poisson Distribution: what it is, and features}
\begin{itemize}
\setlength\itemsep{2em}
\item $\sigma^2_Y = \mu \ \to \ \ \sigma_Y = \sqrt{\mu}.$ \pause
\item Approximated by $\mathcal{N}(\mu, \sqrt{\mu})$ when $\mu >> 10$ \pause
\item Open-ended (unlike Binomial), but in practice, has finite range.
\item Poisson data sometimes called "numerator only": (unlike Binomial) may not ``see'' or count ``non-events''
\end{itemize}
\end{frame}
\begin{frame}[fragile]{Normal approximation to Poisson is the CLT in action}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-7-1}
}
\end{knitrout}
\end{frame}
\begin{frame}
\frametitle{How it arises}
\begin{itemize}
\setlength\itemsep{1em}
\item Count of events or items that \underline{occur randomly}, with \underline{low homogeneous intensity}, in time, space, or `item'-time (e.g. person--time). \pause
\item Binomial($n,\pi$) when $n \rightarrow \infty\textrm{ and } \pi \rightarrow 0,$ but $n \times \pi = \mu$ is finite.\pause
\item $Y\sim Poisson(\mu_Y)$ if time ($T$) between events follows an $T \sim \textrm{Exponential}(\mu_{T} = 1/\mu_{Y}).$
{ \scriptsize \url{http://www.epi.mcgill.ca/hanley/bios601/Intensity-Rate/Randomness_poisson.pdf}} \pause
\item As sum of $\ge 2$ \textit{independent} Poisson random variables,
with same \textbf{or different} $\mu$'s: \newline
$Y_{1} \sim \textrm{Poisson}(\mu_{1}) \: \:
Y_{2} \sim \textrm{Poisson}(\mu_{2}) \Rightarrow Y = Y_{1} + Y_{2} \sim \textrm{Poisson}(\mu_{1}+\mu_{2}).$
\end{itemize}
\end{frame}
\begin{frame}{Poisson distribution as a limit}
The rationale for using the Poisson distribution in many situations is provided by the following proposition.
\vspace*{0.5in}
\begin{proposition}[Limit of a binomial is Poisson]
Suppose that $Y \sim Binomial(n,\pi)$. If we let $\pi = \mu/n$, then as $n \rightarrow \infty$, $Binomial(n,\pi) \rightarrow Poisson(\mu)$. Another way of saying this: for large $n$ and small $\pi$, we can approximate the $Binomial(n,\pi)$ probability by the $Poisson(\mu = n\pi)$.
\end{proposition}
\end{frame}
\begin{frame}{Poisson approximation to the Binomial}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-8-1}
}
\end{knitrout}
\end{frame}
\begin{frame}
\frametitle{Examples}
\begin{itemize}
\setlength\itemsep{0.5em}
\item numbers of asbestos fibres
\item deaths from horse kicks*
\item needle-stick or other percutaneous injuries
\item bus-driver accidents*
\item twin-pairs*
\item radioactive disintegrations*
\item flying-bomb hits*
\item white blood cells
\item typographical errors
\item cell occupants -- in a given volume, area, line-length, population-time, time, etc.
\footnote{\footnotesize * included in \url{http://www.epi.mcgill.ca/hanley/bios601/Intensity-Rate/}}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{}
\begin{figure}[h]
\begin{center}
\includegraphics[width=3.9in,height=2.6in]{DotsinPopulationTime64.pdf}
\caption{Events in Population-Time randomly generated from intensities that are constant within (2 squares high by 2 squares wide) `panels', but vary between such panels. In Epidemiology, each square might represent a number of units of population-time, and each dot an event.}
\end{center}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{}
\begin{figure}[h]
\begin{center}
\includegraphics[width=4in,height=2in]{timeStrips63.pdf}
\caption{Events in Time: 10 examples, randomly generated from constant over time intensities. Simulated with 1000 Bernoulli$(\tiny{\textrm{small }\pi})$'s per time unit.}
\end{center}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Does the Poisson Distribution apply to.. ?}
\begin{enumerate}
\setlength\itemsep{0.9em}
\item Yearly variations in numbers of persons killed in plane crashes
\item Daily variations in numbers of births
\item Weekly variations in numbers of births
\item Daily variations in numbers of deaths
\item Daily variations in numbers of traffic accidents
\item Variations across cookies/pizzas in numbers of chocolate chips/olives
\end{enumerate}
\end{frame}
\section{Inference regarding $\mu$, based on observed count $y$}
\begin{frame}[fragile]{Confidence interval for $\mu$}
\begin{itemize}
\setlength\itemsep{2em}
\item If the CLT hasn't kicked in, then the usual CI might not be appropriate: $$\textrm{point-estimate} \pm z^\star \times \textrm{standard error}$$
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlstd{mosaic}\hlopt{::}\hlkwd{xqpois}\hlstd{(}\hlkwd{c}\hlstd{(}\hlnum{0.025}\hlstd{,} \hlnum{0.975}\hlstd{),} \hlkwc{lambda} \hlstd{=} \hlnum{6}\hlstd{)}
\end{alltt}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-9-1}
}
\begin{verbatim}
## [1] 2 11
\end{verbatim}
\end{knitrout}
\end{itemize}
\end{frame}
\begin{frame}[fragile]{Confidence interval for $\mu$}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlstd{manipulate}\hlopt{::}\hlkwd{manipulate}\hlstd{(}
\hlstd{mosaic}\hlopt{::}\hlkwd{xqpois}\hlstd{(}\hlkwd{c}\hlstd{(}\hlnum{0.025}\hlstd{,} \hlnum{0.975}\hlstd{),} \hlkwc{lambda} \hlstd{= LAMBDA),}
\hlkwc{LAMBDA} \hlstd{= manipulate}\hlopt{::}\hlkwd{slider}\hlstd{(}\hlnum{1}\hlstd{,} \hlnum{200}\hlstd{,} \hlkwc{step} \hlstd{=} \hlnum{1}\hlstd{))}
\end{alltt}
\end{knitrout}
\end{frame}
\begin{frame}[fragile]{Confidence interval for $\mu$}
\begin{itemize}
\setlength\itemsep{2em}
\item Similar to the binomial (Clopper-Pearson CI), we consider a \textit{first-principles} $100(1-\alpha)\%$ CI $[\mu_{LOWER},\: \mu_{UPPER}]$ such that
$$P(Y \ge y \: | \: \mu_{LOWER}) = \alpha/2 \:\: \textrm{ and} \:\: P(Y \le y \: | \: \mu_{UPPER}) = \alpha/2.$$
\item For example, the 95\% CI for $\mu$, based on $y=6,$ is $[\underline{2.20}, \underline{13.06}].$
\end{itemize}
\end{frame}
\begin{frame}
\centering
\includegraphics[width=3in,height=4in]{CI_Poisson(6).pdf}
\end{frame}
\begin{frame}[fragile]{Poisson 95\% CI for $\mu$ when $y = 6$}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlcom{# upper limit --> lower tail needs 2.5%}
\hlstd{manipulate}\hlopt{::}\hlkwd{manipulate}\hlstd{(}
\hlstd{mosaic}\hlopt{::}\hlkwd{xppois}\hlstd{(}\hlnum{6}\hlstd{,} \hlkwc{lambda} \hlstd{= LAMBDA),}
\hlkwc{LAMBDA} \hlstd{= manipulate}\hlopt{::}\hlkwd{slider}\hlstd{(}\hlnum{0.01}\hlstd{,} \hlnum{20}\hlstd{,} \hlkwc{step} \hlstd{=} \hlnum{0.01}\hlstd{))}
\hlcom{# lower limit --> upper tail needs 2.5%}
\hlcom{# when lower.tail=FALSE, ppois doesnt include k, i.e., P(Y > k)}
\hlstd{manipulate}\hlopt{::}\hlkwd{manipulate}\hlstd{(}
\hlstd{mosaic}\hlopt{::}\hlkwd{xppois}\hlstd{(}\hlnum{5}\hlstd{,} \hlkwc{lambda} \hlstd{= LAMBDA,} \hlkwc{lower.tail} \hlstd{=} \hlnum{FALSE}\hlstd{),}
\hlkwc{LAMBDA} \hlstd{= manipulate}\hlopt{::}\hlkwd{slider}\hlstd{(}\hlnum{0.01}\hlstd{,} \hlnum{20}\hlstd{,} \hlkwc{step} \hlstd{=} \hlnum{0.01}\hlstd{))}
\end{alltt}
\end{knitrout}
\end{frame}
\begin{frame}
\frametitle{Confidence interval for $\mu$}
\begin{itemize}
\setlength\itemsep{1em}
\item For a given confidence level, there is one CI for each value of $y$.
\item Each one can be worked out by trial and error, or -- as has been done for the last 80 years -- directly from the (exact) link between \underline{the tail areas} of the Poisson and \textbf{Gamma} distributions.
\item These CI's -- for $y$ up to at least 30 -- were found in special books of statistical tables or in textbooks.
\item As you can check, $z$-based intervals are more than adequate beyond this $y$. \textbf{Today}, if you have access to \texttt{R} (or \texttt{Stata} or \texttt{SAS}) you can obtain the first principles CIs directly \textbf{for \textit{any} value of $y.$}
\end{itemize}
\end{frame}
\begin{frame}{80\%, 90\% and 95\% CI for mean count $\mu$ if we observe \underline{0 to 30 events} in a certain amount of experience}
\tiny
\centering
\begin{tabular}{|r | r r | r r | r r | }
$y$ & \multicolumn{2}{c}{95\%} & \multicolumn{2}{c}{90\%} & \multicolumn{2}{c}{80\%} \\
\hline
0 & 0.00 & 3.69 & 0.00 & 3.00 & 0.00 & 2.30 \\
1 & 0.03 & 5.57 & 0.05 & 4.74 & 0.11 & 3.89 \\
2 & 0.24 & 7.22 & 0.36 & 6.30 & 0.53 & 5.32 \\
3 & 0.62 & 8.77 & 0.82 & 7.75 & 1.10 & 6.68 \\
4 & 1.09 & 10.24 & 1.37 & 9.15 & 1.74 & 7.99 \\
& & & & & & \\
5 & 1.62 & 11.67 & 1.97 & 10.51 & 2.43 & 9.27 \\
\underline{6} & \underline{2.20} & \underline{13.06} & 2.61 & 11.84 & 3.15 & 10.53 \\
7 & 2.81 & 14.42 & 3.29 & 13.15 & 3.89 & 11.77 \\
8 & 3.45 & 15.76 & 3.98 & 14.43 & 4.66 & 12.99 \\
9 & 4.12 & 17.08 & 4.70 & 15.71 & 5.43 & 14.21 \\
& & & & & & \\
10 & 4.80 & 18.39 & 5.43 & 16.96 & 6.22 & 15.41 \\
11 & 5.49 & 19.68 & 6.17 & 18.21 & 7.02 & 16.60 \\
12 & 6.20 & 20.96 & 6.92 & 19.44 & 7.83 & 17.78 \\
13 & 6.92 & 22.23 & 7.69 & 20.67 & 8.65 & 18.96 \\
14 & 7.65 & 23.49 & 8.46 & 21.89 & 9.47 & 20.13 \\
& & & & & & \\
15 & 8.40 & 24.74 & 9.25 & 23.10 & 10.30 & 21.29 \\
16 & 9.15 & 25.98 & 10.04 & 24.30 & 11.14 & 22.45 \\
17 & 9.90 & 27.22 & 10.83 & 25.50 & 11.98 & 23.61 \\
18 & 10.67 & 28.45 & 11.63 & 26.69 & 12.82 & 24.76 \\
19 & 11.44 & 29.67 & 12.44 & 27.88 & 13.67 & 25.90 \\
& & & & & & \\
20 & 12.22 & 30.89 & 13.25 & 29.06 & 14.53 & 27.05 \\
21 & 13.00 & 32.10 & 14.07 & 30.24 & 15.38 & 28.18 \\
22 & 13.79 & 33.31 & 14.89 & 31.41 & 16.24 & 29.32 \\
23 & 14.58 & 34.51 & 15.72 & 32.59 & 17.11 & 30.45 \\
24 & 15.38 & 35.71 & 16.55 & 33.75 & 17.97 & 31.58 \\
\hline
\end{tabular}
\end{frame}
\begin{frame}[fragile]{95\% CI for mean count $\mu$ with \texttt{q} function}
\begin{itemize}
\setlength\itemsep{1em}
\item To obtain these in \texttt{R} we use the natural link between the Poisson and the \textit{gamma}
distributions.\footnote{
{ \tiny \href{http://www.epi.mcgill.ca/hanley/bios601/Mean-Quantile/forAccromathBackTranslate.pdf}{details found here} }}
\item In \texttt{R}, e.g., the 95\% limits for $\mu$ based on $y=6$ are obtained as
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlkwd{qgamma}\hlstd{(}\hlkwc{p} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{0.025}\hlstd{,}\hlnum{0.975}\hlstd{),} \hlkwc{shape} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{6}\hlstd{,} \hlnum{7}\hlstd{))}
\end{alltt}
\begin{verbatim}
## [1] 2.201894 13.059474
\end{verbatim}
\end{knitrout}
\item More generically, for \textit{any} $y$, as
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlkwd{qgamma}\hlstd{(}\hlkwc{p} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{0.025}\hlstd{,}\hlnum{0.975}\hlstd{),} \hlkwc{shape} \hlstd{=} \hlkwd{c}\hlstd{(y, y}\hlopt{+}\hlnum{1}\hlstd{))}
\end{alltt}
\end{knitrout}
\end{itemize}
\end{frame}
\begin{frame}[fragile]{95\% CI for mean count $\mu$ with canned function}
\begin{itemize}
\setlength\itemsep{1em}
\item These limits can \underline{also} be found using the canned function in \texttt{R}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlstd{stats}\hlopt{::}\hlkwd{poisson.test}\hlstd{(}\hlnum{6}\hlstd{)}
\end{alltt}
\begin{verbatim}
##
## Exact Poisson test
##
## data: 6 time base: 1
## number of events = 6, time base = 1, p-value = 0.0005942
## alternative hypothesis: true event rate is not equal to 1
## 95 percent confidence interval:
## 2.201894 13.059474
## sample estimates:
## event rate
## 6
\end{verbatim}
\end{knitrout}
\end{itemize}
\end{frame}
\begin{frame}{$z$-based confidence intervals}
\scriptsize
\underline{once $\mu$ is in the upper teens}, the Poisson $\to$ the Normal
\centering
\includegraphics[scale=0.5]{Shapes.pdf}
\end{frame}
\begin{frame}{$z$-based confidence intervals}
\begin{itemize}
\setlength\itemsep{1.1em}
\item Thus, a plus/minus CI based on SE = $\hat{\sigma} = \sqrt{\hat{\mu}} = \sqrt{y},$ is simply
$$[ \mu_{L}, \ \mu_{U}] = y \ \pm \ z^\star \times \sqrt{y}. \ \ \ \ \ \ \ \ \ \ \ \ $$
\item Equivalently we can use the \texttt{q} function: $$qnorm(p = c(0.025, 0.975), mean = y, sd = \sqrt{y})$$
\pause
\vspace*{-0.7cm}
\item From a single realization $y$ of a $N(\mu,\sigma_{Y})$ random variable, we can't estimate \textbf{both} $\mu$ and $\sigma_{Y}$: for a SE, we would have to use \textit{outside} information on $\sigma_{Y}$.
\pause
\item In the Poisson$(\mu)$ distribution, $\sigma_{Y} = \sqrt{\mu},$ so we calculate a ``\underline{model-based}'' SE.
%\pause
%\item \textbf{How large a $y$?}: When $\mu > 5,$ the distribution isn't `crowded' into the corner: the lower tail of the Gaussian approximation doesn't spill over the 0 boundary.
\end{itemize}
\end{frame}
\begin{frame}
\Wider[8em]{
\centering
\includegraphics[width=4.9in,height=3.6in]{PoissonNomogram.pdf}
}
\end{frame}
\section{Inference regarding an event rate parameter $\lambda$, based on observed number of events $y$ in a known amount of population-time (PT)}
\begin{frame}{Rates are better for comparisons}
\begin{table}
\centering
\begin{tabular}{cc}
year & deaths ($y$) \\
\hline
1971 & 33 \\
2002 & 211 \\
\hline
\end{tabular}
\caption{Deaths from lung cancer in the age-group 55-60 in Quebec in 1971 and 2002}
\end{table}
\pause
\blue{A researcher asks:} Is the situation getting worse over time for lung cancer in this age group?
\pause
\vspace*{0.5in}
\red{Your reply:} \textbf{What's the denominator??}
\end{frame}
\begin{frame}
\framedgraphic{patrick.png}
\end{frame}
\begin{frame}{Rates are better for comparisons}
\small
\begin{itemize}
\setlength\itemsep{1em}
\item So far, we have focused on inference regarding $\mu$, the expected \textbf{number} of events in the amount of experience actually studied.
\item However, for \underline{comparison} purposes, the frequency is more often expressed as a \textbf{rate}, \textbf{intensity} or \textbf{incidence density (ID)}. \pause
\end{itemize}
\begin{table}
\centering
\begin{tabular}{cccc}
year & deaths ($y$) & person-time (PT) & rate ($\hat{\lambda}$) \\
\hline
1971 & 33 & 131,200 years & 25 per 100,000 women-years\\
2002 & 211 & 232,978 years & 91 per 100,000 women-years \\
\hline
\end{tabular}
\caption{Deaths from lung cancer in the age-group 55-60 in Quebec in 1971 and 2002}
\end{table}
\end{frame}
\begin{frame}{Rates are better for comparisons}
\begin{itemize}
\setlength\itemsep{1.5em}
\item The \textit{statistic}, the empirical rate or empirical incidence density, is
$$rate =\hat{ID} = \hat{\lambda} = y/\textrm{PT}.$$
\item where $y$ is the observed number of events and PT is the amount of Population-Time in which these events were observed.
\item We think of $\hat{ID}$ or $ \hat{\lambda}$ as a point estimate of the (theoretical) Incidence Density \textit{parameter}, ID or $\lambda$.
\end{itemize}
\end{frame}
\begin{frame}{CI for the rate parameter $\lambda$}
\begin{itemize}
\item To calculate a CI for the ID parameter, we \textbf{treat the PT \underline{denominator} as a constant}, and the \textbf{\underline{numerator}, $y$, as a Poisson random variable}, with expectation $E[y] = \mu = \lambda \times PT$, so that
\begin{align*}
\lambda &= \mu \div \textrm{PT}\\
\hat{\lambda} &= \hat{\mu} \div \textrm{PT} \\
& = y\div\textrm{ PT}
\end{align*}
\vspace*{0.3in}
\begin{equation}
\boxed{\textrm{CI for }\lambda = \{\textrm{CI for }\mu\} \div \textrm{PT}.}
\end{equation}
\end{itemize}
\end{frame}
\begin{frame}[fragile]{CI for the rate parameter $\lambda$}
\begin{itemize}
\setlength\itemsep{1.5em}
\small
\item $y=211$ deaths from lung cancer in 2002 leads to a 95\% CI for $\mu$:
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlkwd{qgamma}\hlstd{(}\hlkwc{p} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{0.025}\hlstd{,} \hlnum{0.975}\hlstd{),} \hlkwc{shape} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{211}\hlstd{,} \hlnum{212}\hlstd{))}
\end{alltt}
\begin{verbatim}
## [1] 183.4885 241.4725
\end{verbatim}
\end{knitrout}
\pause
\item From this we can calculate the 95\% CI \textbf{per 100,000 WY} for $\lambda$ using a PT=232978 years:
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlkwd{qgamma}\hlstd{(}\hlkwc{p} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{0.025}\hlstd{,} \hlnum{0.975}\hlstd{),} \hlkwc{shape} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{211}\hlstd{,} \hlnum{212}\hlstd{))} \hlopt{/} \hlnum{232978} \hlopt{*} \hlnum{1e5}
\end{alltt}
\begin{verbatim}
## [1] 78.75788 103.64607
\end{verbatim}
\end{knitrout}
\pause
\item $y=33$ deaths from lung cancer in 131200 women-years in 1971 leads to a 95\% CI per 100,000 WY for $\lambda$ of
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlkwd{qgamma}\hlstd{(}\hlkwd{c}\hlstd{(}\hlnum{0.025}\hlstd{,}\hlnum{0.975}\hlstd{),} \hlkwd{c}\hlstd{(}\hlnum{33}\hlstd{,}\hlnum{34}\hlstd{))} \hlopt{/} \hlnum{131200} \hlopt{*} \hlnum{1e5}
\end{alltt}
\begin{verbatim}
## [1] 17.31378 35.32338
\end{verbatim}
\end{knitrout}
\end{itemize}
\end{frame}
\begin{frame}[fragile]{CI for the rate parameter $\lambda$ using canned function}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlstd{stats}\hlopt{::}\hlkwd{poisson.test}\hlstd{(}\hlkwc{x} \hlstd{=} \hlnum{33}\hlstd{,} \hlkwc{T} \hlstd{=} \hlnum{131200}\hlstd{)}
\end{alltt}
\begin{verbatim}
##
## Exact Poisson test
##
## data: 33 time base: 131200
## number of events = 33, time base = 131200, p-value < 2.2e-16
## alternative hypothesis: true event rate is not equal to 1
## 95 percent confidence interval:
## 0.0001731378 0.0003532338
## sample estimates:
## event rate
## 0.0002515244
\end{verbatim}
\end{knitrout}
\end{frame}
\section{Test of $H_{0}: \mu = \mu_{0}$ $\quad \Leftrightarrow \quad$ $\lambda = \lambda_{0}$}
\begin{frame}{Statistical evidence and the $p$-value}
\textbf{Recall:}
\vspace*{1cm}
\begin{itemize}
\setlength\itemsep{1.2em}
\item P-Value = Prob[$y$ or more extreme $ |\:H_{0}$]
\item With `more extreme' determined by whether $H_{alt}$ is 1-sided or 2-sided.
\item For a \textbf{formal test}, at level $\alpha$, compare this P-value with $\alpha$.
\end{itemize}
\end{frame}
\begin{frame}{Example: Cancers surrounding nuclear stations}
\small
\begin{itemize}
\setlength\itemsep{.3em}
\item Cancers in area surrounding the Douglas Point nuclear station \pause
\item Denote by $\{CY_{1},CY_{2}, \dots \}$ the numbers of Douglas Point \underline{c}hild-\underline{y}ears of experience in the various age categories that were pooled over.
\item Denote by $\{\lambda^{Ont}_{1}, \lambda^{Ont}_{2}, \dots \}$ the age-specific leukemia incidence rates during the period studied. \pause
\item If the underlying incidence rates in Douglas Point were the same as those in the rest of Ontario, the \textbf{\textit{E}}xpected total number of cases of leukemia for Douglas Point would be
$$E = \mu_{0} = \sum_{ages} CY_{i} \times \lambda^{Ont}_{i} = 0.57.$$
\pause
The actual total number of cases of leukemia \textbf{\textit{O}}bserved in Douglas Point was
$$O = y = \sum_{ages} O_{i} = 2.$$
\pause
Age \textit{Standardized Incidence Ratio (SIR)} = $O/E = 2/0.57 = 3.5.$
\end{itemize}
\end{frame}
\begin{frame}{Q: Is the $O=2$ significantly higher than $E=0.57$}
\blue{Question:}
\begin{itemize}
\setlength\itemsep{1.2em}
\item Is the $y = 2$ cases of leukemia observed in the Douglas Point experience statistically significantly \underline{higher} than the $E=0.57$ cases ``expected'' for this many child-years of observation if in fact the rates in Douglas Point and the rest of Ontario were the same?
\item Or, is the $y=2$ observed in this community compatible with $H_{0}: y \sim \textrm{Poisson}(\mu = 0.57)$?
\end{itemize}
\end{frame}
\begin{frame}[fragile]{A: Is the $O=2$ significantly higher than $E=0.57$}
\small
\begin{itemize}
\setlength\itemsep{1.2em}
\item \red{Answer:} Under $H_{0}$, the age-specific numbers of leukemias $\{y_{1}=O_{1},\: y_{2}=O_{2},\: \dots \}$ in Douglas Point can be regarded as independent Poisson random variables, so their sum $y$ can be regarded as a single Poisson random variable with $\mu=0.57$.
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlstd{mosaic}\hlopt{::}\hlkwd{xppois}\hlstd{(}\hlnum{1}\hlstd{,} \hlkwc{lambda} \hlstd{=} \hlnum{0.57}\hlstd{,} \hlkwc{lower.tail} \hlstd{=} \hlnum{FALSE}\hlstd{)}
\end{alltt}
{\centering \includegraphics[width=1\linewidth]{figure/unnamed-chunk-19-1}
}
\begin{verbatim}
## [1] 0.1121251
\end{verbatim}
\end{knitrout}
\end{itemize}
\end{frame}
\begin{frame}[fragile]{95\% CI for the SIR by hand}
\small
\begin{itemize}
\setlength\itemsep{1.2em}
\item To get the \underline{CI for the SIR}, divide the CI for Douglas Point $\mu_{DP}$ by the null $\mu_0 = 0.57$ (Ontario scaled down to the same size and age structure as Douglas Point.) We treat it as a constant because the Ontario rates used in the scaling are measured with much less sampling variability that the Douglas Point ones.
\pause
\item The $y$ = 2 cases translates to
\begin{itemize}
\item 95\% CI for $\mu_{DP}$ $\to$ [0.24, 7.22]
\item 95\% CI for the SIR $\to$ [0.24/0.57, 7.22/0.57]=[0.4, 12.7].
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}[fragile]{95\% CI for the SIR using canned function}
\small
\begin{itemize}
\setlength\itemsep{1.2em}
\item We can \textit{trick} \texttt{stats::poisson.test}
to get the same CI by putting time as 0.57:
\end{itemize}
\begin{knitrout}\scriptsize
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}
\begin{alltt}
\hlstd{stats}\hlopt{::}\hlkwd{poisson.test}\hlstd{(}\hlkwc{x}\hlstd{=}\hlnum{2}\hlstd{,}\hlkwc{T}\hlstd{=}\hlnum{0.57}\hlstd{)}
\end{alltt}
\begin{verbatim}
##
## Exact Poisson test
##
## data: 2 time base: 0.57
## number of events = 2, time base = 0.57, p-value = 0.1121
## alternative hypothesis: true event rate is not equal to 1
## 95 percent confidence interval:
## 0.4249286 12.6748906
## sample estimates:
## event rate
## 3.508772
\end{verbatim}
\end{knitrout}
\end{frame}
\section{Examples of Poisson and not-so Poisson variation}
\end{document}
{
\setbeamercolor{background canvas}{bg=}
\includepdf[pages=-]{PoissonAndExtraPoissonVariation.pdf}
}
\begin{frame}{Example: Cancers surrounding nuclear stations}
\begin{itemize}
\setlength\itemsep{1.2em}
\item
\end{itemize}
\end{frame}
\begin{frame}{Not Poisson vs. Not Poisson}
\begin{itemize}
\setlength\itemsep{1.2em}
\item horsekicks is poisson
\item fullmoons within one day (when it comes to contrast, then compare full moon vs. other days)
\item congenital anomaly reported is
\item Daily numbers of SUdden Infant deaths is not poisson. its a seasonal thing
\end{itemize}
\end{frame}
| {
"alphanum_fraction": 0.695413984,
"author": null,
"avg_line_length": 29.3631840796,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "e6804c1976091fec2b1baa68240f58c92afad921",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-25T21:19:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-25T21:19:06.000Z",
"max_forks_repo_head_hexsha": "ac2f917bc064f8028a875766af847114cd306396",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "ly129/EPIB607",
"max_forks_repo_path": "slides/one_sample_rate/EPIB607_one_sample_rate.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ac2f917bc064f8028a875766af847114cd306396",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "ly129/EPIB607",
"max_issues_repo_path": "slides/one_sample_rate/EPIB607_one_sample_rate.tex",
"max_line_length": 339,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ac2f917bc064f8028a875766af847114cd306396",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "ly129/EPIB607",
"max_stars_repo_path": "slides/one_sample_rate/EPIB607_one_sample_rate.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 13241,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 35412
} |
from random import uniform, choice
from tqdm import tqdm
from functools import partial
from p_tqdm import p_map
from .backtest import backtest
from .utils.getters import get_strategy
from numpy import isnan, nan
import warnings
warnings.simplefilter(action='ignore', category=Warning)
def populate(size, config_range):
get_val = lambda k,v: uniform(v[0], v[1]) if isinstance(v[0], float) else int(uniform(v[0], v[1]))
return [{'fitness': None, 'config': {k: get_val(k,v) for k,v in config_range.items()}} for _ in range(size)]
def get(r, d):
s = d.loc[r[-2]:r[-1],]
low = s.loc[s['low']==min(s['low'])]['low'].iat[0]
high = s.loc[s['high']==max(s['high'])]['high'].iat[0]
return [low, high]
def _truncate(d):
d['time'] = d.index
d = d.reset_index()
t = d[((~isnan(d['buy']))|(~isnan(d['sell'])))]
t['ix'] = t.index
t['iy'] = t['ix'].shift(-1)
if len(t) > 0: t.at[t.index[-1], 'iy'] = d.index[-1]
t['iy'] = t['iy'].apply(lambda i: int(i))
t[['low','high']] = [get(r, d) for r in t.itertuples()]
return t.set_index('time').drop(['ix','iy'], axis=1)
def fit(p, data, strategy, truncate):
d = strategy(data.copy(deep=False), p['config'])
t = _truncate(d) if truncate else d
p['fitness'] = backtest(t, p['config'])
return p
def select(data, pop, i, n_iter, strategy, truncate):
pop = list(p_map(partial(fit, data=data, strategy_name=strategy, truncate=truncate), pop, desc=' fitting', leave=False))
pop = sorted(pop, reverse=True, key=lambda p: p['fitness'])
return pop[:int(len(pop)/3)]
def crossover(selected):
return [{'fitness': None, 'config': {k: choice(selected)['config'][k] for k in selected[0]['config'].keys()}} for _ in range(len(selected))]
def mutate(subpop, config_range, max_mut_diff):
def mut_val(k,v):
new_val = -1
while new_val < config_range[k][0] or new_val > config_range[k][1]:
new_val = v * uniform(1-max_mut_diff,1+max_mut_diff)
if not isinstance(config_range[k][0], float):
new_val = int(new_val)
return new_val
return [{'fitness': None, 'config': {k: mut_val(k,v) for k,v in p['config'].items()}} for p in subpop]
def optimize(data, strategy, config_range, pop_size=1000, n_iter=100, max_mut_diff=.2, max_reps=-1, truncate: bool = False):
min_ticks_options = config_range.pop('min_ticks')
ticks = len(data)
reps, best = 0, None
pop = populate(pop_size, config_range)
for i in tqdm(range(n_iter), desc='Generation', leave=False):
selected = select(data, pop, i, n_iter, strategy, truncate)
if max_reps > 0:
if selected[0] == best:
reps += 1
else:
best = selected[0]
reps = 0
if reps > max_reps:
return selected[0]['config']
crossed = crossover(selected)
mutated = mutate(selected, config_range, max_mut_diff)
fill_count = pop_size - 5 - len(mutated) - len(crossed)
pop = [*selected[:5], *mutated, *crossed, *populate(fill_count, config_range)]
result = pop[0]['config']
result['min_ticks'] = max([result[o] for o in min_ticks_options])
return result | {
"alphanum_fraction": 0.6136434109,
"author": null,
"avg_line_length": 41.3461538462,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9e72b235ebc512fc3afe2e20a0b66afb744a59cc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "34c67d7a19a9d51a7478050ada6286d97ffa9910",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "andymitch/yuzu",
"max_forks_repo_path": "yuzu/optimize.py",
"max_issues_count": 10,
"max_issues_repo_head_hexsha": "34c67d7a19a9d51a7478050ada6286d97ffa9910",
"max_issues_repo_issues_event_max_datetime": "2021-07-07T03:45:20.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-01T04:13:15.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "andymitch/yuzu",
"max_issues_repo_path": "yuzu/optimize.py",
"max_line_length": 144,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "34c67d7a19a9d51a7478050ada6286d97ffa9910",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "andymitch/yuzu",
"max_stars_repo_path": "yuzu/optimize.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 914,
"path": null,
"reason": "from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3225
} |
import numpy as np
class Board:
def __init__(self, n=3, empty_slots_mark=-10):
self.board = np.full((n, n), empty_slots_mark).astype(int)
self.n = n
self.empty_slots_mark = empty_slots_mark
def play_turn(self, chosen_location, mark):
"""
Args:
chosen_location (tuple):
mark (int):
Returns:
"""
self.board[chosen_location] = mark
def empty_slots(self):
helper = ((np.where(self.board.flatten() == self.empty_slots_mark)[0] / self.n).astype(int),
np.where(self.board.flatten() == self.empty_slots_mark)[0].astype(int))
return list(zip(helper[0], (helper[1] - self.n * helper[0])))
def game_tie(self):
if self.board.sum() > 0:
return 1
def _horizontal_win(self, player_marks=(0, 1)):
helper = self.board.sum(1) / self.n
evaluate = [x for x in helper if x in player_marks]
if len(evaluate) > 0:
return evaluate[0]
def _vertical_win(self, player_marks=(0, 1)):
helper = self.board.T.sum(1) / self.n
evaluate = [x for x in helper if x in player_marks]
if len(evaluate) > 0:
return evaluate[0]
def _diagonal_win(self, player_marks=(0, 1)):
evaluate = self.board.diagonal().sum() / self.n
if evaluate in player_marks:
return evaluate
def _flip_diagonal_win(self, player_marks=(0, 1)):
evaluate = np.fliplr(self.board).diagonal().sum() / self.n
if evaluate in player_marks:
return evaluate
def game_won(self, player_marks=(0, 1)):
helper = [self._horizontal_win(self.board), self._vertical_win(self.board),
self._diagonal_win(self.board), self._flip_diagonal_win(self.board)]
evaluate = [x for x in helper if x in player_marks]
if len(evaluate) > 0:
return evaluate[0]
def game_end(self):
"""
Args:
player (Player): Player that has the current turn
Returns: int whether the current board game ended or not. And if it ended returns the winning player.
"""
if self.game_won() == 0:
return 0
elif self.game_won() == 1:
return 1
elif self.game_tie() == 1:
return -1
else:
return -10
| {
"alphanum_fraction": 0.5751578947,
"author": null,
"avg_line_length": 30.8441558442,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "33b840c32c215a26b9ce3bbba15c9b9ee150bab4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a23a923f7d8e294eecb9ce75e474aa787c97d836",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alkashef/pic_pac_poe",
"max_forks_repo_path": "BuildingBlocks/Engine/Board.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a23a923f7d8e294eecb9ce75e474aa787c97d836",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alkashef/pic_pac_poe",
"max_issues_repo_path": "BuildingBlocks/Engine/Board.py",
"max_line_length": 109,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a23a923f7d8e294eecb9ce75e474aa787c97d836",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alkashef/pic_pac_poe",
"max_stars_repo_path": "BuildingBlocks/Engine/Board.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 586,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2375
} |
import sys
import numpy as np
import numpy.random as npr
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
from scipy.special import gammaln
import matplotlib.pyplot as plt
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
def locus_prob(locus_obs_counts, locus_freq):
log_prob = 0.0
n_pop = locus_obs_counts.shape[0]
for p1 in xrange(n_pop):
for p2 in xrange(n_pop):
log_prob += log_multinomial(locus_obs_counts[p1], locus_freq[p2])
return log_prob
def probability(observed_counts):
observed_frequencies = normalize_haplotypes(observed_counts)
n_loci = observed_counts.shape[0]
locus_probabilities = np.zeros(n_loci)
for locus in xrange(n_loci):
prob = locus_prob(observed_counts[locus, :, :], observed_frequencies[locus, :, :])
locus_probabilities[locus] = prob
return locus_probabilities
def main(occur_fl, output_fl):
observed_counts = read_counts(occur_fl)
print observed_counts.shape
locus_log_probs = probability(observed_counts)
sortable = [(locus_log_probs[i], i) for i in xrange(len(locus_log_probs))]
sortable.sort()
threshold_idx = int(0.05 * len(sortable))
fl = open(output_fl, "w")
for log_prob, i in sortable[:threshold_idx]:
fl.write("%s %s\n" % (i, log_prob))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
output_fl = sys.argv[2]
main(occur_fl, output_fl)
| {
"alphanum_fraction": 0.7529335072,
"author": null,
"avg_line_length": 27.8909090909,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ed7d921d6c8702d7d807454647cea1560ad68a6f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "308079909c49a478787c103f2d2b8ee037ac6952",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "rnowling/pop-gen-models",
"max_forks_repo_path": "cross-multinomial/cross_multinomial_predict.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "308079909c49a478787c103f2d2b8ee037ac6952",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "rnowling/pop-gen-models",
"max_issues_repo_path": "cross-multinomial/cross_multinomial_predict.py",
"max_line_length": 100,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "308079909c49a478787c103f2d2b8ee037ac6952",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "rnowling/pop-gen-models",
"max_stars_repo_path": "cross-multinomial/cross_multinomial_predict.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 434,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1534
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.