repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Brazelton-Lab/bio_utils
|
bio_utils/iterators/fasta.py
|
1
|
5590
|
#! /usr/bin/env python3
"""Faster, simpler, Screed-esque iterator for FASTA files
Copyright:
fasta.py iterate over and return entries of a FASTA file
Copyright (C) 2015 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
__author__ = 'Alex Hyer'
__email__ = 'theonehyer@gmail.com'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Production'
__version__ = '3.0.1'
class FastaEntry:
"""A simple class to store data from FASTA entries and write them
Attributes:
id (str): FASTA ID (everything between the '>' and the first space
of header line)
description (str): FASTA description (everything after the first
space of the header line)
sequence (str): FASTA sequence
"""
def __init__(self):
"""Initialize attributes to store FASTA entry data"""
self.id = None
self.description = None
self.sequence = None
def write(self):
"""Return FASTA formatted string
Returns:
str: FASTA formatted string containing entire FASTA entry
"""
if self.description:
return '>{0} {1}{3}{2}{3}'.format(self.id,
self.description,
self.sequence,
os.linesep)
else:
return '>{0}{2}{1}{2}'.format(self.id,
self.sequence,
os.linesep)
def fasta_iter(handle, header=None):
"""Iterate over FASTA file and return FASTA entries
Args:
handle (file): FASTA file handle, can be any iterator so long as it
it returns subsequent "lines" of a FASTA entry
header (str): Header line of next FASTA entry, if 'handle' has been
partially read and you want to start iterating at the next entry,
read the next FASTA header and pass it to this variable when
calling fasta_iter. See 'Examples.'
Yields:
FastaEntry: class containing all FASTA data
Raises:
IOError: If FASTA entry doesn't start with '>'
Examples:
The following two examples demonstrate how to use fasta_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in fasta_iter(open('test.fasta')):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
>>> fasta_handle = open('test.fasta')
>>> next(fasta_handle) # Skip first entry header
>>> next(fasta_handle) # Skip first entry sequence
>>> first_line = next(fasta_handle) # Read second entry header
>>> for entry in fasta_iter(fasta_handle, header=first_line):
... print(entry.id) # Print FASTA id
... print(entry.description) # Print FASTA description
... print(entry.sequence) # Print FASTA sequence
... print(entry.write()) # Print full FASTA entry
"""
# Speed tricks: reduces function calls
append = list.append
join = str.join
strip = str.strip
next_line = next
if header is None:
header = next(handle) # Read first FASTQ entry header
# Check if input is text or bytestream
if (isinstance(header, bytes)):
def next_line(i):
return next(i).decode('utf-8')
header = strip(header.decode('utf-8'))
else:
header = strip(header)
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
line = strip(next_line(handle))
data = FastaEntry()
try:
if not header[0] == '>':
raise IOError('Bad FASTA format: no ">" at beginning of line')
except IndexError:
raise IOError('Bad FASTA format: file contains blank lines')
try:
data.id, data.description = header[1:].split(' ', 1)
except ValueError: # No description
data.id = header[1:]
data.description = ''
# Obtain sequence
sequence_list = []
while line and not line[0] == '>':
append(sequence_list, line)
line = strip(next_line(handle)) # Raises StopIteration at EOF
header = line # Store current line so it's not lost next iteration
data.sequence = join('', sequence_list)
yield data
except StopIteration: # Yield last FASTA entry
data.sequence = ''.join(sequence_list)
yield data
|
gpl-3.0
| -4,799,499,336,410,846,000
| 33.294479
| 82
| 0.584258
| false
| 4.329977
| true
| false
| false
|
berrak/cookiecutter-py3starter
|
{{cookiecutter.github_repo_name}}/{{cookiecutter.package_name}}/cli.py
|
1
|
1728
|
#!/usr/bin/env python3
#
# Copyright {{ cookiecutter.author_name }}, {{ cookiecutter.initial_year_to_release }}
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
{{ cookiecutter.package_name }}.cli
-----------------------------------
Entry for the `{{ cookiecutter.package_name }}` CLI.
"""
import sys
import argparse
from {{ cookiecutter.package_name }} import __version__
from {{ cookiecutter.package_name }}.utils.environment import python_version
from {{ cookiecutter.package_name }}.api.greetings import Greetings
from {{ cookiecutter.package_name }}.api.greetings import howdy_greeting
def main(argv=sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"-V, --version", help="show the version and exit", action="version",
version="%(prog)s: version {version} (Python {pyversion})".format(version=__version__, pyversion=python_version()))
parser.add_argument(
"-c, --cowboy", help="cowboy greeting",
action="store_true", dest="iscowboy",
default=False)
args = parser.parse_args()
# Do some meaningful ...
if args.iscowboy:
print(howdy_greeting())
else:
greetings = Greetings()
print(greetings)
return 0
|
apache-2.0
| -237,356,212,394,522,000
| 29.315789
| 123
| 0.681713
| false
| 3.585062
| false
| false
| false
|
roiser/WLCG
|
ssbCvmfsStatus/wlcg-cvmfs4ssb.py
|
1
|
10226
|
#/bin/env python
import urllib, json, datetime
from xml.parsers import expat
class c4s :
def __init__(self):
self.cvmfsBaseVersionFile = 'cvmfsVersion.txt'
self.requestedVersion = ''
self.myVO = 'LHCb'
self.cvmfsColumnNo = 202
self.wlcgTopoColumnNo = 144
self.topoDict = {'WLCG':{}, self.myVO:{}}
self.ssbTimePat = '%Y-%m-%dT%H:%M:%S'
self.dontpanic = 'http://www.adluge.com/wp-content/uploads/2013/09/homer-simpson-doh.gif'
self.topologyURL = 'http://lhcb-web-dirac.cern.ch/topology/lhcb_topology.xml'
self.wlcgBaseUrl = 'http://wlcg-mon.cern.ch/dashboard/request.py/'
self.wlcgGetUrl = self.wlcgBaseUrl+'getplotdata?columnid=%d&time=24&sites=all&batch=1'
self.wlcgSiteBaseLink = 'http://lhcb-web-dirac.cern.ch/DIRAC/LHCb-Production/undefined/grid/SiteStatus/display?name='
self.ssbMetrics = ['CvmfsVersion','CvmfsRepoRevision','CvmfsMountPoint','CvmfsCondDBMountPoint', 'CvmfsProbeTime', 'CvmfsStratumOnes', 'CvmfsNumSquids', 'CvmfsProbeNoInfo', 'CvmfsProbeLink']
self.ssbData = {}
for k in self.ssbMetrics : self.ssbData[k] = {}
### start probe functions ###
### eval functions ###
def evalCvmfsProbeLink(self, val, site):
return (val, 'green')
def evalCvmfsProbeNoInfo(self, val, site) :
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
if self.ssbData['CvmfsVersion'][site] == 'not installed' : return ('n/a (not installed)', 'grey')
we = val.split(':')[0]
if we == 'WARNING' : return (val, 'orange')
if we == 'ERROR' : return (val, 'red')
return (val, 'green')
def evalCvmfsVersion(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
if val == 'nfs' : return (val, 'green')
if val in ('n/a', 'not installed') : return (val, 'red')
x = 2
maxDiff = range(x+1)
deplV = map(lambda x: int(x), val.split('.'))
reqV = map(lambda x: int(x), self.requestedVersion.split('.'))
if deplV[1] == reqV[1] and deplV[0] == reqV[0] :
if (reqV[2] - deplV[2]) in maxDiff : return (val, 'green')
else : return (val, 'orange')
else : return (val, 'red')
def evalCvmfsRepoRevision(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('nfs', 'not installed') : return ('n/a (%s)'%vers, 'grey')
return (val, 'green')
def evalCvmfsMountPoint(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('not installed') : return ('n/a (%s)'%vers, 'grey')
if val and val == '/cvmfs/lhcb.cern.ch' : return (val, 'green')
else : return (val, 'orange')
def evalCvmfsCondDBMountPoint(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
if self.ssbData['CvmfsVersion'][site] == 'not installed' : return ('n/a (not installed)', 'grey')
if val == 'yes' : return (val, 'orange')
else : return (val, 'green')
def evalCvmfsProbeTime(self, val, site):
if val == 'no probe' : return (val, 'red')
pTime = datetime.datetime.strptime(val,self.ssbTimePat)
curTime = datetime.datetime.now()
delta = (curTime - pTime).seconds
if delta < 21600 : return (val, 'green')
elif delta < 43200 : return (val, 'orange')
else : return (val, 'red')
def evalCvmfsStratumOnes(self, val, site) :
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('nfs', 'not installed') : return ('n/a (%s)'%vers, 'grey')
if val : return (val, 'green')
else: return ('none', 'red')
def evalCvmfsNumSquids(self, val, site):
if self.ssbData['CvmfsProbeTime'][site] == 'no probe' : return ('n/a (no probe)', 'grey')
vers = self.ssbData['CvmfsVersion'][site]
if vers in ('nfs', 'not installed') : return ('n/a (%s)'%vers, 'grey')
if val :
if int(val) > 1 : return (val, 'green')
else : return (val, 'orange')
else: return (val , 'red')
### retrieval functions ###
def getValCvmfsProbeLink(self, site, probe, metric):
self.ssbData['CvmfsProbeLink'][site]=metric['URL']
def getValCvmfsProbeNoInfo(self, site, probe, metric):
val = 'none'
pat = 'INFO: Mandatory tests exectuted successfully, now continuing with testing optional repositories'
for line in probe :
we = line.split(':')[0]
if line[:len(pat)] == pat : break
elif we == 'WARNING' and val.split(':')[0] != 'ERROR' : val = line
elif we == 'ERROR' : val = line
self.ssbData['CvmfsProbeNoInfo'][site] = val
def getValCvmfsVersion(self, site, probe, metric):
pat1 = 'INFO: CVMFS version installed '
pat2 = 'INFO: Mandatory mount point /cvmfs/lhcb.cern.ch is nfs mount point'
pat3 = 'INFO: No cvmfs rpms found on WN, checking if this WN uses nfs mounting of CVMFS repositories'
ver = 'n/a'
noCVMFS = False
cvmfsViaNFS = False
for line in probe :
if line[:len(pat1)] == pat1 :
ver = line[len(pat1):]
elif line[:len(pat2)] == pat2 :
ver = 'nfs'
cvmfsViaNFS = True
elif line[:len(pat3)] == pat3 :
noCVMFS = True
if noCVMFS and not cvmfsViaNFS : ver = 'not installed'
self.ssbData['CvmfsVersion'][site] = ver
def getValCvmfsRepoRevision(self, site, probe, metric):
pat = 'INFO: repository revision '
rev = 'n/a'
for line in probe :
if line[:len(pat)] == pat :
rev = line[len(pat):]
break
self.ssbData['CvmfsRepoRevision'][site] = rev
def getValCvmfsMountPoint(self, site, probe, metric):
pat1 = 'INFO: Variable VO_LHCB_SW_DIR points to CVMFS mount point '
pat2 = 'INFO: Mandatory mount point /cvmfs/lhcb.cern.ch is nfs mount point'
mp = 'n/a'
for line in probe :
if line[:len(pat1)] == pat1 :
mp = line[len(pat1):]
elif line[:len(pat2)] == pat2 :
mp = '/cvmfs/lhcb.cern.ch'
self.ssbData['CvmfsMountPoint'][site] = mp
def getValCvmfsCondDBMountPoint(self, site, probe, metric):
pat = 'INFO: repository /cvmfs/lhcb-conddb.cern.ch available'
cm = 'no'
for line in probe :
if line[:len(pat)] == pat :
cm = 'yes'
self.ssbData['CvmfsCondDBMountPoint'][site] = cm
def getValCvmfsProbeTime(self, site, probe, metric):
self.ssbData['CvmfsProbeTime'][site] = metric['URL'].split('&')[1].split('=')[1][:-1]
# self.ssbData['CvmfsProbeTime'][site] = metric['EndTime']
def getValCvmfsStratumOnes(self, site, probe, metric) :
strats = []
pat = 'INFO: Servers: '
for line in probe :
if line[:len(pat)] == pat :
stratumL = line[len(pat):]
for serv in stratumL.split() :
strats.append('.'.join(serv.split('/')[2].split(':')[0].split('.')[-2:]))
break
self.ssbData['CvmfsStratumOnes'][site] = ' '.join(strats)
def getValCvmfsNumSquids(self, site, probe, metric) :
numSq = 0
pat = 'INFO: Proxies: '
for line in probe :
if line[:len(pat)] == pat :
numSq = len(line[len(pat):].split())
break
self.ssbData['CvmfsNumSquids'][site] = numSq
### end probe functions ####
def xmlStartElement(self, name, attrs):
if name == 'atp_site' : self.currWLCGSite = attrs['name']
if name == 'group' and attrs['type'] == 'LHCb_Site' :
self.topoDict['WLCG'][attrs['name']] = self.currWLCGSite
def bootstrap(self):
# get WLCG Mon mapping VO site name <-> site ID
topo = json.loads(urllib.urlopen(self.wlcgGetUrl%self.wlcgTopoColumnNo).read())
for ent in topo['csvdata'] : self.topoDict[self.myVO][ent['SiteId']] = ent['Status']
# read CVMFS base line version number
f = open(self.cvmfsBaseVersionFile, 'r')
self.requestedVersion = f.read()
f.close()
# read topology file and create mapping VO site name <-> WLCG site name
topo = urllib.urlopen(self.topologyURL).read()
p = expat.ParserCreate()
p.StartElementHandler = self.xmlStartElement
p.Parse(topo)
def clearSsbData(self, site):
for metric in self.ssbMetrics :
self.ssbData[metric][site] = ''
def collectInfo(self):
info = json.loads(urllib.urlopen(self.wlcgGetUrl%self.cvmfsColumnNo).read())
for metricInf in info['csvdata'] :
site = self.topoDict[self.myVO][metricInf['SiteId']]
tTime = datetime.datetime.strptime(metricInf['Time'], self.ssbTimePat)
dTime = self.ssbData['CvmfsProbeTime'].get(site)
if ( not dTime ) or ( datetime.datetime.strptime(dTime, self.ssbTimePat) < tTime ) :
if dTime : self.clearSsbData(site)
tl = urllib.urlopen(self.wlcgBaseUrl+metricInf['URL']).read().split('\n')
for metr in self.ssbMetrics : eval('self.getVal'+metr)(site, tl, metricInf)
for site in self.topoDict['WLCG'].keys() :
if not self.ssbData['CvmfsProbeTime'].get(site) :
for metric in self.ssbMetrics : self.ssbData[metric][site] = ''
self.ssbData['CvmfsProbeTime'][site] = 'no probe'
def writeSSBColumns(self):
for k in self.ssbMetrics :
fun = 'self.eval'+k
colData = self.ssbData[k]
f = open(k+'.ssb.txt', 'w')
for site in colData.keys() :
now = str(datetime.datetime.now())
(val, color) = eval(fun)(colData[site], site)
url = self.dontpanic
if self.ssbData['CvmfsProbeLink'].get(site): url = self.wlcgBaseUrl+self.ssbData['CvmfsProbeLink'][site]
f.write('%s\t%s\t%s\t%s\t%s\n' % (now, site, val, color, url))
f.close()
def createWLCGLHCbMapping(self):
f = open('WLCGSiteMapping.ssb.txt','w')
for site in self.topoDict['WLCG'].keys() :
now = str(datetime.datetime.now())
val = self.topoDict['WLCG'][site]
color = 'white'
url = self.wlcgSiteBaseLink+site
f.write('%s\t%s\t%s\t%s\t%s\n' % (now, site, val, color, url))
def run(self):
self.bootstrap()
self.collectInfo()
self.writeSSBColumns()
self.createWLCGLHCbMapping()
if __name__ == '__main__' :
c4s().run()
|
mit
| 7,568,485,905,248,002,000
| 39.741036
| 194
| 0.622531
| false
| 2.956346
| false
| false
| false
|
arkabytes/abc
|
ABC/migrations/0005_auto_20171023_0929.py
|
1
|
7572
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-23 09:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ABC', '0004_auto_20171023_0924'),
]
operations = [
migrations.AddField(
model_name='company',
name='email',
field=models.EmailField(default=None, max_length=254),
),
migrations.AddField(
model_name='company',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='company',
name='web',
field=models.URLField(default='http://'),
),
migrations.AddField(
model_name='customer',
name='email',
field=models.EmailField(default=None, max_length=254),
),
migrations.AddField(
model_name='customer',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='customer',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='customer',
name='web',
field=models.URLField(default='http://'),
),
migrations.AddField(
model_name='deliverytype',
name='cost',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='deliverytype',
name='days',
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AddField(
model_name='event',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoice',
name='amount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoice',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoice',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='invoice',
name='tax_base',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoice',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoicedetails',
name='discount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='invoicedetails',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='quantity',
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AddField(
model_name='invoicedetails',
name='subtotal',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='invoicedetails',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='item',
name='cost_price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='item',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='item',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='item',
name='retail_price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='item',
name='stock',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='order',
name='amount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='delivery_cost',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='finished',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='order',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='order',
name='payment_cost',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='tax_base',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='order',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='orderdetails',
name='discount',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='orderdetails',
name='price',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='quantity',
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AddField(
model_name='orderdetails',
name='subtotal',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='orderdetails',
name='vat',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='provider',
name='email',
field=models.EmailField(default=None, max_length=254),
),
migrations.AddField(
model_name='provider',
name='notes',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='provider',
name='postal_code',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='provider',
name='web',
field=models.URLField(default='http://'),
),
migrations.AddField(
model_name='task',
name='description',
field=models.TextField(default=None),
),
migrations.AddField(
model_name='task',
name='notice',
field=models.TextField(default=None),
),
]
|
gpl-3.0
| 5,220,348,303,067,717,000
| 29.288
| 66
| 0.509377
| false
| 4.875724
| false
| false
| false
|
tomhur/domoticz-scripts
|
python/script_time_verisure.py
|
1
|
3621
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import domoticz as d
import sys
import time
sys.path.insert(0, '/opt/python-verisure/')
import verisure
import pickle
import pytz
import urllib3
import certifi
from datetime import datetime
from tzlocal import get_localzone
debug = False
try:
execfile("/etc/domoticz/scripts.conf")
except:
exec(open("/etc/domoticz/scripts.conf").read())
d.log("Getting status from Verisure...")
if int(time.time()) % frequency < 60 :
#Login
try:
f = open(mypagesSession, 'rb')
myPages = pickle.load(f)
f.close()
except:
myPages = verisure.Session(email, verisurepass)
myPages.login()
f = open(mypagesSession, 'wb')
pickle.dump(myPages, f)
f.close()
if debug:
d.log("Loading file failed.")
#Get overview
try:
overview = myPages.get_overview()
except:
myPages = verisure.Session(email, verisurepass)
myPages.login()
f = open(mypagesSession, 'wb')
pickle.dump(myPages, f)
f.close()
overview = myPages.get_overview()
if debug:
d.log("Session was timed out")
#Alarm
status = overview['armState']['statusType']
if debug:
d.log("Verisure Alarm status: ", status )
device = d.devices[atHome]
if status == "DISARMED" or status == "ARMED_HOME":
device.on()
else:
device.off()
#Smartplugs
for i in overview['controlPlugs']:
if debug:
d.log("Verisure Smartplug status for " + i['area'].encode("utf-8","ignore") + ": ", i['currentState'] )
device = d.devices[i['area'].encode("utf-8","ignore")]
if i['currentState'] == "ON":
device.on()
else:
device.off()
#Climate
for i in overview['climateValues']:
device = d.devices[i['deviceArea'].encode("utf-8","ignore")]
domlastupdate = datetime.strptime(device.last_update_string, '%Y-%m-%d %H:%M:%S')
verilastupdate = datetime.strptime(i['time'][:-5], '%Y-%m-%dT%H:%M:%S')
verilastupdate = verilastupdate.replace(tzinfo=pytz.UTC)
verilastupdate = verilastupdate.astimezone(get_localzone())
verilastupdate = verilastupdate.replace(tzinfo=None)
if debug:
d.log("Domoticz last update of " + device.name + ": " + str(domlastupdate))
d.log("Verisure last update of " + device.name + ": " + str(verilastupdate))
if verilastupdate > domlastupdate:
if debug:
d.log("update domoticz climate device " + device.name)
if debug:
d.log("time: " + i['time'] )
d.log("location: " + i['deviceArea'].encode("utf-8","ignore") )
d.log("serial: " + i['deviceLabel'] )
d.log("temperature: " + str(i['temperature']))
if 'humidity' in i:
if debug:
d.log("humidity: " + str(i['humidity']))
if i['humidity'] < 20:
comf = 2
if i['humidity'] >= 20 and i['humidity'] <= 35:
comf = 0
if i['humidity'] > 35 and i['humidity'] <= 75:
comf = 1
if i['humidity'] > 75:
comf = 3
url = baseurl + "type=command¶m=udevice&idx=" + climate[i['deviceArea'].encode("utf-8","ignore")] + "&nvalue=0&svalue=" + str(i['temperature']) + ";" + str(i['humidity']) + ";" + str(comf)
else:
url = baseurl + "type=command¶m=udevice&idx=" + climate[i['deviceArea'].encode("utf-8","ignore")] + "&nvalue=0&svalue=" + str(i['temperature'])
if debug:
d.log('URL: ' + url)
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
r = http.request('GET', url, timeout=2.5)
if debug:
d.log("Status code: " + str(r.status) + "\n" + r.data)
if r.status != 200:
d.log("Error updating temp in Domoticz. HTTP code: " + str(r.status) + " " + r.data)
else:
if debug:
d.log("Only runs every " + str(frequency/60) + " min.")
d.log("done getting status from Verisure")
|
mit
| 8,858,491,424,379,706,000
| 28.680328
| 196
| 0.638498
| false
| 2.772588
| false
| false
| false
|
AFMD/smallProjects
|
nanowire-network-simulations/manningp3plotedit6.py
|
1
|
20377
|
"""
Created on Mon Jun 15 15:42:23 2020
@author: sturdzal
"""
#@title Imports
from shapely.geometry import LineString, MultiLineString, MultiPoint, Point
from shapely.ops import cascaded_union
from scipy.special import comb
from itertools import product
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
import numpy as np
from itertools import islice
from cvxopt import matrix, solvers
from cvxopt.base import sparse
from cvxopt.base import matrix as m
from cvxopt.lapack import *
from cvxopt.blas import *
import cvxopt.misc as misc
#from pykrylov.symmlq import symmlq
#from pykrylov.symmlq import *
#from symmlq import *
#import symmlq
import networkx as nx
from itertools import islice, combinations
from collections import Counter, defaultdict
#from pykrylov.linop import PysparseLinearOperator
#from pykrylov.symmlq import *
import scipy
from scipy.sparse.linalg import *
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import minres
import os.path
import time
import os
import matplotlib.pyplot as plt
import random
from statistics import mean
#------------------Parameter--------------------
R_junc = 1.0 # 100100
#R_junc_list = [1000, 10000, 100000, 10000000, 10000000]
rho0 = 0.314 #0.0790 #0.8 #0.0790 #0.0226
#rho0_list = [0.000314, 0.00314, 0.0314, 0.314, 3.14, 31.4, 314]
wire_diameter = 2 #30.0
wire_length= 1.0 #6.0
extinction_coeff = 4 #0.2
box_length = 5 #15.0 5x wire length gives good results independent of tol for e-9 to e-15
samples = 1
elec_length = box_length
box_y = box_length
lead_sep = box_length
n_min = 0.16411
nstep = 10*n_min
n_initial = 40*n_min #1.90079+30*nstep #0.16411
n_final = 80*n_min #1.90079+31*nstep
percentage_chance = 0.0
distl = False
lower_l = 2.2
upper_l = np.inf
sigmal = 2.0
lmean = wire_length
A0 = math.pi*((wire_diameter*0.001)/2)**2
# End ---------- Parameters block -------------
# ---------- Parameters for symmlq routine -------------
tol=1e-10
show=False
maxit=None
#----------- Parameters for Calculation time display --------
start_time = time.process_time()
# ---------- Output file -------------
res_file = "output2.txt"
if os.path.exists(res_file)==False:
open(res_file, "w").write("Density AF Transmittance Average_resistance resStdev Junct_density R_junc rho0 wire_diameter wire_length box_length samples nstep n_initial n_final tolerance_minres distl lower_l upper_l sigmal junctions_removal calctime\n")
#res_dist = open(res_file,"a")
# ---------- Auxiliary lists for ensemble calculation -------------
res_list=[]
short_sep_list=[]
junc_dens=[]
dens_temp=[]
avg_res_temp=[]
st_dev_temp=[]
resistancelist=[]
transmittancelist=[]
diameterlist=[]
d_counter=0
for wire_diameter in np.arange(1, 4, 2):
transmittancelist.append([])
resistancelist.append([])
diameterlist.append(wire_diameter)
for density in np.arange(n_initial,n_final,nstep):
for sample in range(samples):
while True:
try:
area = box_length**2 # box area (in um^2)
box_x = box_length # box width (in um)
box_y = box_length # box length (in um)
num_junc = 0 # junction counter
nwires = area*density # total number of nanowires
# Start ---------- Creation of random stick coordinates and electrodes -------------
# a single wire is represented by a set of initial and final coordinates as [(x1,y1),(x2,y2)].
x1 = np.random.rand(int(nwires))*box_x
y1 = np.random.rand(int(nwires))*box_y
length_array = np.zeros(int(nwires))
if distl == True:
lengths = stats.truncnorm((lower_l - lmean) / sigmal, (upper_l - lmean) / sigmal, loc=lmean, scale=sigmal)
length_array = lengths.rvs(size=nwires)
else:
length_array.fill(wire_length)
# Sorting the angles that define the wire orientation (in radians from 0 to 2 *pi).
theta1 = np.random.rand(int(nwires))*2.0*math.pi
x2 = length_array * np.cos(theta1) + x1
y2 = length_array * np.sin(theta1) + y1
# Adding to the coordinate list (x1,y1) the points corresponding to the contact leads.
x1 = np.insert(x1, 0, 0.0)
x1 = np.insert(x1, 0,0)
# Adding to the coordinate list (x2,y2) the points corresponding to the contact leads.
x2 = np.insert(x2, 0, 0.0)
x2 = np.insert(x2, 0,0)
ypostop = box_y/2 + elec_length/2
yposbot = box_y/2 - elec_length/2
y1 = np.insert(y1, 0,ypostop)
y1 = np.insert(y1, 0,ypostop)
y2 = np.insert(y2, 0,yposbot)
y2 = np.insert(y2, 0, yposbot)
xposleft = box_x/2-lead_sep/2
xposright = box_x/2+lead_sep/2
x1[0]= xposleft
x2[0] = xposleft
x1[1] = xposright
x2[1] = xposright
# Merging [(x1,y1),(x2,y2)] in accordance to shapely format.
# coords1 = zip(x1,y1)
# coords2 = zip(x2,y2)
# coords = zip(coords1,coords2)
coords1 = list(zip(x1,y1))
coords2 = list(zip(x2,y2))
coords = list(zip(coords1,coords2))
mlines = MultiLineString(coords)
nwires_plus_leads = int(nwires+2)
# End ---------- Creation of random stick coordinates and electrodes -------------
# Start ---------- Identifying intersections between wires -------------
# all pair wire combination
lines_comb = combinations(mlines, 2)
# list storing True or False for pair intersection
intersection_check = [pair[0].intersects(pair[1]) for pair in lines_comb]
# list storing the indexes of intersection_check where the intersection between two wires is TRUE
intersections = [i for i, x in enumerate(intersection_check) if x and random.random() > percentage_chance]
# full list containing all non-repeated combinations of wires
combination_index = list((i,j) for ((i,_),(j,_)) in combinations(enumerate(mlines), 2))
# list storing the connection (wire_i, wire_j)
intersection_index = [combination_index[intersections[i]] for i in range(len(intersections))]
# checking the coordinates for interesection points
inter_point_coll = [pair[0].intersection(pair[1]) for pair in combinations(mlines, 2)]
# eliminating empty shapely points from the previous list
no_empty_inter_point_coll = [inter_point_coll[intersections[i]] for i in range(len(intersections))]
# total number of intersections
nintersections = len(intersection_index)
# End ---------- Identifying intersections between wires -------------
# Start ---------- MNR nodal mapping -------------
# dictionary containing wire index: [list of wires connected to a given wire]
wire_touch_list = defaultdict(list)
for k, v in intersection_index:
wire_touch_list[k].append(v)
wire_touch_list[v].append(k)
# dictionary containing wire index: [label nodes following MNR mapping]
wire_touch_label_list = defaultdict(list)
each_wire_inter_point_storage = defaultdict(list)
label = 2
# Assigning new node labelling according to MNR mapping
for i in iter(wire_touch_list.items()):
for j in range(len(i[1])):
cpoint = mlines[i[0]].intersection(mlines[i[1][j]])
npoint = (cpoint.x,cpoint.y)
each_wire_inter_point_storage[i[0]].append(npoint)
if i[0] > 1:
wire_touch_label_list[i[0]].append(label)
label += 1
else:
wire_touch_label_list[i[0]].append(i[0])
maxl = label # dimension of the resistance matrix
# flattening intersection_index for counting the amount of occurances of wire i
flat = list(sum(intersection_index, ()))
conn_per_wire = Counter(flat)
# checking for isolated wires
complete_list = range(nwires_plus_leads)
isolated_wires = [x for x in complete_list if not x in flat]
# list containing the length segments of each wire (if it has a junction)
each_wire_length_storage = [[] for _ in range(nwires_plus_leads)]
# Routine that obtains the segment lengths on each wire
for i in each_wire_inter_point_storage:
point_ini = Point(mlines[i].coords[0])
point_fin = Point(mlines[i].coords[1])
wlength = point_ini.distance(point_fin)
wire_points = each_wire_inter_point_storage[i]
dist = [0.0]*(len(wire_points)+1)
for j in range(len(wire_points)):
point = Point(wire_points[j])
dist[j] = point_ini.distance(point)
dist[-1] = wlength
dist.sort()
dist_sep = [0.0]*len(dist)
dist_sep[0] = dist[0]
dist_sep[1:len(dist)] = [dist[k]-dist[k-1] for k in range(1,len(dist))]
each_wire_length_storage[i].append(dist_sep)
# End ---------- MNR nodal mapping -------------
# The MNR mapping associated to the NWN is also converted into a mathematical graph given by G.
# G contains 2*nintersections nodes and we conventioned that left and right electrodes are labelled as node 0 and 1, respectively.
G = nx.Graph()
G.add_nodes_from(range(2*nintersections))
mr_matrix_plus = np.zeros((2*nintersections,2*nintersections))
inner_count = 0
inter_count = 0
#nx.draw(G)
#nx.draw_random(G)
#nx.draw_circular(G)
nx.draw_spectral(G, node_size= 10)
##nx.draw_networkx_nodes(G)
plt.show()
# Start ---------- Building resistance matrix -------------
for iwire in range(nwires_plus_leads):
if each_wire_inter_point_storage[iwire]:
for j, pointj in enumerate(each_wire_inter_point_storage[iwire]):
point = Point(pointj)
for i, pointw in enumerate(each_wire_inter_point_storage[iwire]):
comp_pointw = Point(pointw)
inter_dist = point.distance(comp_pointw)
round_inter_dist = round(inter_dist, 4)
for il in each_wire_length_storage[iwire][0]:
value = float(il)
value = round(value,4)
if value == round_inter_dist and value != 0:
inner_resis = (float(value) * rho0 / A0)
if iwire != 0 and iwire != 1 and mr_matrix_plus[wire_touch_label_list[iwire][i], wire_touch_label_list[iwire][j]] == 0.0:
mr_matrix_plus[wire_touch_label_list[iwire][i], wire_touch_label_list[iwire][j]] = -1.0/inner_resis
mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[iwire][i]] = -1.0/inner_resis
G.add_edge(wire_touch_label_list[iwire][i],wire_touch_label_list[iwire][j])
inner_count += 1
for k, label in enumerate(wire_touch_list[iwire]):
for kk, pointk in enumerate(each_wire_inter_point_storage[label]):
pointk = Point(pointk)
inter_dist = point.distance(pointk)
round_inter_dist = round(inter_dist, 4)
if round_inter_dist == 0 and mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[label][kk]] == 0:
G.add_edge(wire_touch_label_list[label][kk],wire_touch_label_list[iwire][j])
r0 = -1/R_junc
mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[label][kk]] = r0
mr_matrix_plus[wire_touch_label_list[label][kk], wire_touch_label_list[iwire][j]] = r0
sum_rows_mr_plus = mr_matrix_plus.sum(1)
np.fill_diagonal(mr_matrix_plus, abs(sum_rows_mr_plus))
mr_nozero_rows_plus = mr_matrix_plus[~(mr_matrix_plus==0).all(1),:]
# nonconnected wires are eliminated from the resistance matrix
mr_nonconnected_plus = mr_nozero_rows_plus[:,~(mr_nozero_rows_plus==0).all(0)]
# End ---------- Building resistance matrix -------------
# input current vector
i0 = 1.0 # absolute value of the current (in Amp)
ic = np.zeros(mr_nonconnected_plus.shape[0])
ic[0] = +i0
ic[1] = -i0
Imatrix = m(ic)
# Solving Ohm's law in matrix form, R^(-1)V = I. Resulting voltages are in Volts.
#Amatrix = m(mr_nonconnected_plus)
#Amatrix = np.array(mr_nonconnected_plus)
#ks = Symmlq(Imatrix)
#elec_pot_mr = ks.solve(Gfun)
#print Gfun
#print Imatrix
#or
#ks = Symmlq(Gfun)
#print Amatrix
#elec_pot_mr = ks.solve(Imatrix)
Amatrix = csc_matrix(mr_nonconnected_plus)
elec_pot_mr = minres(Amatrix, Imatrix, tol=tol)
#elec_pot_mr = Symmlq(Imatrix, Gfun, show=show, rtol=tol, maxit=maxit)
#elec_pot_mr = minres(Imatrix, Amatrix)
# Sheet resistance
resistance = ((elec_pot_mr[0][0] - elec_pot_mr[0][1]))/i0
# Checking if there is a path connecting electrodes at nodes 0 and 1
if nx.has_path(G,0,1):
separation_short = nx.shortest_path_length(G,0,1)
res_list.append(resistance)
short_sep_list.append(separation_short)
junc_dens.append(float(nintersections)/area)
except IndexError:
continue
break
AF = density*wire_diameter*wire_length*0.001
transmittance = round(math.exp(-AF*extinction_coeff), 4)
junc_avg = np.mean(junc_dens)
resAvg = np.mean(res_list)
resStd = np.std(res_list)
short = np.mean(short_sep_list)
dens_temp.append(junc_avg)
avg_res_temp.append(resAvg)
st_dev_temp.append(resStd)
open(res_file,"a").write("%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n" %(density,AF,transmittance,resAvg,resStd,junc_avg,R_junc,rho0,wire_diameter,wire_length,box_length,samples,nstep,n_initial,n_final,tol,distl,lower_l,upper_l,sigmal,percentage_chance,round(time.process_time() - start_time, 5)))
print("Density: %s, Transmittance: %s, Average resistance: %s, Standard deviation: %s, Junction density: %s, Junctions removed: %s" %(density,transmittance,round(resAvg, 6),round(resStd, 4),round(junc_avg, 4), percentage_chance))
print("runtime was", round(time.process_time() - start_time, 5), "seconds")
#remove 'nan' data points from arrays to avoid curve fit errors
if np.isnan(transmittance) or np.isnan(resAvg) != True:
transmittancelist[d_counter].append(transmittance)
resistancelist[d_counter].append(resAvg)
res_list=[]
short_sep_list=[]
junc_dens=[]
d_counter=d_counter+1
print(transmittancelist)
print(resistancelist)
print(diameterlist)
for j in np.arange(0,d_counter,1):
transmittancelist[j]=np.array(transmittancelist[j], dtype=np.float64)
resistancelist[j]=np.array(resistancelist[j], dtype=np.float64)
#T vs Rs plot and fit
from scipy.optimize import curve_fit
Z0=377
def T_perc_func(r, p, n):
return (1+(1/p)*((Z0/r)**(1/(1+n))))**(-2)
def T_bulk_func(r,sratio):
return (1+(sratio*Z0/(2*r)))**(-2)
#may need to adjust if further colors necessary
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
for i in np.arange(0,d_counter,1):
popt_perc, pcov_perc = curve_fit(T_perc_func, resistancelist[i], transmittancelist[i])
popt_perc
popt_bulk, pcov_bulk = curve_fit(T_bulk_func, resistancelist[i], transmittancelist[i])
popt_bulk
resistancelist_srt=np.sort(resistancelist[i])
#print(resistancelist_srt)
reslistlength=len(resistancelist_srt)
res_start=resistancelist_srt[0]
res_end=resistancelist_srt[reslistlength-1]
res_step= (res_end - res_start)/25
print(res_start, res_end, res_step)
resfitlist=[]
for j in np.arange(res_start,res_end + (res_step/2),res_step):
resfitlist.append(j)
#print(resfitlist)
resfitlist=np.array(resfitlist, dtype=np.float64)
plotcolor=colors[i]
plt.plot(resfitlist, T_perc_func(resfitlist, *popt_perc), plotcolor, linestyle='-', label='Percolative fit: \u03A0 =%5.3f, n=%5.3f' % tuple(popt_perc))
plt.plot(resfitlist, T_bulk_func(resfitlist, *popt_bulk), plotcolor, linestyle='--', label='Bulk fit: \u03C3 ratio=%5.3f' % tuple(popt_bulk))
plt.plot(resistancelist[i], transmittancelist[i], plotcolor, marker='o', linestyle='None', label='diameter=%s nm' %(diameterlist[i]))
plt.title('T vs Rs')
plt.ylabel('T')
plt.xlabel('Rs (Ohm/sq) - Log scale')
plt.xscale('log')
leg = plt.legend(loc='best', ncol=2, mode="expand", shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.show()
#Convert T to T^(-1/2)-1 and take log of both arrays
for j in np.arange(0,d_counter,1):
transmittancelist[j]=np.log((transmittancelist[j]**(-1/2))-1)
resistancelist[j]=np.log(resistancelist[j])
#print(transmittancelist)
#print(resistancelist)
def best_fit_slope_and_intercept(xs,ys):
m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /
((mean(xs)*mean(xs)) - mean(xs*xs)))
b = mean(ys) - m*mean(xs)
return m, b
#line fit and plot data on log scale
for i in np.arange(0,d_counter,1):
m, b = best_fit_slope_and_intercept(resistancelist[i],transmittancelist[i])
print(m,b)
#plot best fit line on graph
regression_line = [(m*x)+b for x in resistancelist[i]]
plotcolor=colors[i]
plt.plot(resistancelist[i],transmittancelist[i], plotcolor, marker= 'o', linestyle='None', label='diameter=%s nm' %(diameterlist[i]))
plt.plot(resistancelist[i], regression_line, plotcolor, linestyle='-', label='Line Fit y = %s x + %s' %(round(m,3),round(b,3)))
plt.title('Log(T^(-1/2)-1) vs Log(Rs) with Line Fit')
plt.ylabel('Log(T^(-1/2)-1)')
plt.xlabel('Log(Rs)')
leg = plt.legend(loc='best', ncol=2, mode="expand", shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.show()
open(res_file,"a").close()
duration = 0.1
freq = 1100
|
gpl-2.0
| -3,190,882,586,409,944,000
| 40.416667
| 327
| 0.551112
| false
| 3.36977
| false
| false
| false
|
rbuffat/pyidf
|
tests/test_zonemixing.py
|
1
|
5372
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.zone_airflow import ZoneMixing
log = logging.getLogger(__name__)
class TestZoneMixing(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_zonemixing(self):
pyidf.validation_level = ValidationLevel.error
obj = ZoneMixing()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# alpha
var_design_flow_rate_calculation_method = "Flow/Zone"
obj.design_flow_rate_calculation_method = var_design_flow_rate_calculation_method
# real
var_design_flow_rate = 0.0
obj.design_flow_rate = var_design_flow_rate
# real
var_flow_rate_per_zone_floor_area = 0.0
obj.flow_rate_per_zone_floor_area = var_flow_rate_per_zone_floor_area
# real
var_flow_rate_per_person = 0.0
obj.flow_rate_per_person = var_flow_rate_per_person
# real
var_air_changes_per_hour = 0.0
obj.air_changes_per_hour = var_air_changes_per_hour
# object-list
var_source_zone_name = "object-list|Source Zone Name"
obj.source_zone_name = var_source_zone_name
# real
var_delta_temperature = 10.1
obj.delta_temperature = var_delta_temperature
# object-list
var_delta_temperature_schedule_name = "object-list|Delta Temperature Schedule Name"
obj.delta_temperature_schedule_name = var_delta_temperature_schedule_name
# object-list
var_minimum_zone_temperature_schedule_name = "object-list|Minimum Zone Temperature Schedule Name"
obj.minimum_zone_temperature_schedule_name = var_minimum_zone_temperature_schedule_name
# object-list
var_maximum_zone_temperature_schedule_name = "object-list|Maximum Zone Temperature Schedule Name"
obj.maximum_zone_temperature_schedule_name = var_maximum_zone_temperature_schedule_name
# object-list
var_minimum_source_zone_temperature_schedule_name = "object-list|Minimum Source Zone Temperature Schedule Name"
obj.minimum_source_zone_temperature_schedule_name = var_minimum_source_zone_temperature_schedule_name
# object-list
var_maximum_source_zone_temperature_schedule_name = "object-list|Maximum Source Zone Temperature Schedule Name"
obj.maximum_source_zone_temperature_schedule_name = var_maximum_source_zone_temperature_schedule_name
# object-list
var_minimum_outdoor_temperature_schedule_name = "object-list|Minimum Outdoor Temperature Schedule Name"
obj.minimum_outdoor_temperature_schedule_name = var_minimum_outdoor_temperature_schedule_name
# object-list
var_maximum_outdoor_temperature_schedule_name = "object-list|Maximum Outdoor Temperature Schedule Name"
obj.maximum_outdoor_temperature_schedule_name = var_maximum_outdoor_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.zonemixings[0].name, var_name)
self.assertEqual(idf2.zonemixings[0].zone_name, var_zone_name)
self.assertEqual(idf2.zonemixings[0].schedule_name, var_schedule_name)
self.assertEqual(idf2.zonemixings[0].design_flow_rate_calculation_method, var_design_flow_rate_calculation_method)
self.assertAlmostEqual(idf2.zonemixings[0].design_flow_rate, var_design_flow_rate)
self.assertAlmostEqual(idf2.zonemixings[0].flow_rate_per_zone_floor_area, var_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.zonemixings[0].flow_rate_per_person, var_flow_rate_per_person)
self.assertAlmostEqual(idf2.zonemixings[0].air_changes_per_hour, var_air_changes_per_hour)
self.assertEqual(idf2.zonemixings[0].source_zone_name, var_source_zone_name)
self.assertAlmostEqual(idf2.zonemixings[0].delta_temperature, var_delta_temperature)
self.assertEqual(idf2.zonemixings[0].delta_temperature_schedule_name, var_delta_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].minimum_zone_temperature_schedule_name, var_minimum_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].maximum_zone_temperature_schedule_name, var_maximum_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].minimum_source_zone_temperature_schedule_name, var_minimum_source_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].maximum_source_zone_temperature_schedule_name, var_maximum_source_zone_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].minimum_outdoor_temperature_schedule_name, var_minimum_outdoor_temperature_schedule_name)
self.assertEqual(idf2.zonemixings[0].maximum_outdoor_temperature_schedule_name, var_maximum_outdoor_temperature_schedule_name)
|
apache-2.0
| 8,416,077,038,472,639,000
| 51.676471
| 142
| 0.706627
| false
| 3.610215
| false
| false
| false
|
applied-mixnetworks/txmix
|
txmix/udp_transport.py
|
1
|
1452
|
from __future__ import print_function
import attr
from zope.interface import implementer
from twisted.internet.interfaces import IReactorUDP
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import defer
from txmix import IMixTransport
@implementer(IMixTransport)
@attr.s()
class UDPTransport(DatagramProtocol, object):
"""
implements the IMixTransport interface
"""
name = "udp"
reactor = attr.ib(validator=attr.validators.provides(IReactorUDP))
addr = attr.ib(validator=attr.validators.instance_of(tuple))
def register_protocol(self, protocol):
# XXX todo: assert that protocol provides the appropriate interface
self.protocol = protocol
def start(self):
"""
make this transport begin listening on the specified interface and UDP port
interface must be an IP address
"""
interface, port = self.addr
self.reactor.listenUDP(port, self, interface=interface)
return defer.succeed(None)
def send(self, addr, message):
"""
send message to addr
where addr is a 2-tuple of type: (ip address, UDP port)
"""
self.transport.write(message, addr)
return defer.succeed(None)
def datagramReceived(self, datagram, addr):
"""
i am called by the twisted reactor when our transport receives a UDP packet
"""
self.protocol.received(datagram)
|
gpl-3.0
| 8,444,479,411,809,278,000
| 29.25
| 83
| 0.683196
| false
| 4.4
| false
| false
| false
|
catapult-project/catapult
|
telemetry/telemetry/internal/platform/network_controller_backend.py
|
3
|
7521
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
from telemetry.internal.util import webpagereplay_go_server
from telemetry.internal.util import ts_proxy_server
from telemetry.util import wpr_modes
class ArchiveDoesNotExistError(Exception):
"""Raised when the archive path does not exist for replay mode."""
pass
class ReplayAndBrowserPortsError(Exception):
"""Raised an existing browser would get different remote replay ports."""
pass
class NetworkControllerBackend(object):
"""Control network settings and servers to simulate the Web.
Network changes include forwarding device ports to host platform ports.
Web Page Replay is used to record and replay HTTP/HTTPS responses.
"""
def __init__(self, platform_backend):
self._platform_backend = platform_backend
# Controller options --- bracketed by Open/Close
self._wpr_mode = None
# Replay options --- bracketed by StartReplay/StopReplay
self._archive_path = None
self._make_javascript_deterministic = None
self._extra_wpr_args = None
# Network control services
self._ts_proxy_server = None
self._forwarder = None
self._wpr_server = None
def Open(self, wpr_mode):
"""Get the target platform ready for network control.
This will both start a TsProxy server and set up a forwarder to it.
If options are compatible and the controller is already open, it will
try to re-use the existing server and forwarder.
After network interactions are over, clients should call the Close method.
Args:
wpr_mode: a mode for web page replay; available modes are
wpr_modes.WPR_OFF, wpr_modes.APPEND, wpr_modes.WPR_REPLAY, or
wpr_modes.WPR_RECORD. Setting wpr_modes.WPR_OFF configures the
network controller to use live traffic.
"""
if self.is_open:
use_live_traffic = wpr_mode == wpr_modes.WPR_OFF
if self.use_live_traffic != use_live_traffic:
self.Close() # Need to restart the current TsProxy and forwarder.
else:
if self._wpr_mode != wpr_mode:
self.StopReplay() # Need to restart the WPR server, if any.
self._wpr_mode = wpr_mode
return
self._wpr_mode = wpr_mode
try:
local_port = self._StartTsProxyServer()
self._forwarder = self._platform_backend.forwarder_factory.Create(
local_port=local_port, remote_port=None)
except Exception:
self.Close()
raise
@property
def is_open(self):
return self._ts_proxy_server is not None
@property
def use_live_traffic(self):
return self._wpr_mode == wpr_modes.WPR_OFF
@property
def host_ip(self):
return self._platform_backend.forwarder_factory.host_ip
def Close(self):
"""Undo changes in the target platform used for network control.
Implicitly stops replay if currently active.
"""
self.StopReplay()
self._StopForwarder()
self._StopTsProxyServer()
self._wpr_mode = None
def StartReplay(self, archive_path, make_javascript_deterministic,
extra_wpr_args):
"""Start web page replay from a given replay archive.
Starts as needed, and reuses if possible, the replay server on the host.
Implementation details
----------------------
The local host is where Telemetry is run. The remote is host where
the target application is run. The local and remote hosts may be
the same (e.g., testing a desktop browser) or different (e.g., testing
an android browser).
A replay server is started on the local host using the local ports, while
a forwarder ties the local to the remote ports.
Both local and remote ports may be zero. In that case they are determined
by the replay server and the forwarder respectively. Setting dns to None
disables DNS traffic.
Args:
archive_path: a path to a specific WPR archive.
make_javascript_deterministic: True if replay should inject a script
to make JavaScript behave deterministically (e.g., override Date()).
extra_wpr_args: a tuple with any extra args to send to the WPR server.
"""
assert self.is_open, 'Network controller is not open'
if self.use_live_traffic:
return
if not archive_path:
# TODO(slamm, tonyg): Ideally, replay mode should be stopped when there is
# no archive path. However, if the replay server already started, and
# a file URL is tested with the
# telemetry.core.local_server.LocalServerController, then the
# replay server forwards requests to it. (Chrome is configured to use
# fixed ports fo all HTTP/HTTPS requests.)
return
if (self._wpr_mode == wpr_modes.WPR_REPLAY and
not os.path.exists(archive_path)):
raise ArchiveDoesNotExistError(
'Archive path does not exist: %s' % archive_path)
if (self._wpr_server is not None and
self._archive_path == archive_path and
self._make_javascript_deterministic == make_javascript_deterministic and
self._extra_wpr_args == extra_wpr_args):
return # We may reuse the existing replay server.
self._archive_path = archive_path
self._make_javascript_deterministic = make_javascript_deterministic
self._extra_wpr_args = extra_wpr_args
local_ports = self._StartReplayServer()
self._ts_proxy_server.UpdateOutboundPorts(
http_port=local_ports['http'], https_port=local_ports['https'])
def StopReplay(self):
"""Stop web page replay.
Stops the replay server if currently active.
"""
self._StopReplayServer()
self._archive_path = None
self._make_javascript_deterministic = None
self._extra_wpr_args = None
def _StartReplayServer(self):
"""Start the replay server and return the started local_ports."""
self._StopReplayServer() # In case it was already running.
self._wpr_server = webpagereplay_go_server.ReplayServer(
self._archive_path,
self.host_ip,
http_port=0,
https_port=0,
replay_options=self._ReplayCommandLineArgs())
return self._wpr_server.StartServer()
def _StopReplayServer(self):
"""Stop the replay server only."""
if self._wpr_server:
self._wpr_server.StopServer()
self._wpr_server = None
def _StopForwarder(self):
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
def _StopTsProxyServer(self):
"""Stop the replay server only."""
if self._ts_proxy_server:
self._ts_proxy_server.StopServer()
self._ts_proxy_server = None
def _ReplayCommandLineArgs(self):
wpr_args = list(self._extra_wpr_args)
if self._wpr_mode == wpr_modes.WPR_APPEND:
wpr_args.append('--append')
elif self._wpr_mode == wpr_modes.WPR_RECORD:
wpr_args.append('--record')
if not self._make_javascript_deterministic:
wpr_args.append('--inject_scripts=')
return wpr_args
def _StartTsProxyServer(self):
assert not self._ts_proxy_server, 'ts_proxy_server is already started'
host_ip = None if self.use_live_traffic else self.host_ip
self._ts_proxy_server = ts_proxy_server.TsProxyServer(host_ip=host_ip)
self._ts_proxy_server.StartServer()
return self._ts_proxy_server.port
@property
def forwarder(self):
return self._forwarder
@property
def ts_proxy_server(self):
return self._ts_proxy_server
|
bsd-3-clause
| -3,696,970,511,087,998,500
| 33.819444
| 80
| 0.68568
| false
| 3.866838
| false
| false
| false
|
felipenaselva/felipe.repository
|
script.module.placenta/lib/resources/lib/sources/fr/filmenstreaminghd.py
|
1
|
6831
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re, urllib, urlparse, base64, json, unicodedata
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.priority = 1
self.language = ['fr']
self.domains = ['filmenstreaminghd.co']
#http://dpstreaming.tv/?s=max+steel
#http://dpstreaming.tv/max-steel-vostfr-streaming-telecharger/']
self.base_link = 'http://www.filmenstreaminghd.co'
self.key_link = '?'
self.moviesearch_link = 's=%s'
self.tvsearch_link = 's=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
print '------------------------------- -------------------------------'
sources = []
print url
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
season = data['season'] if 'season' in data else False
episode = data['episode'] if 'episode' in data else False
print season, episode
if season and episode:
print 'TV'
self.search_link = 'query=%s&submit=Submit+Query'
aTitle = data['tvshowtitle']
else:
self.search_link = 'query=%s&submit=Submit+Query'
aTitle = data['title']
post = self.search_link % (urllib.quote_plus(cleantitle.query(aTitle)))
url = 'http://www.filmenstreaminghd.com/recherche/'
t = cleantitle.get(aTitle)
r = client.request(url, XHR=True, referer=url, post=post)
r = client.parseDOM(r, 'div', attrs={'class': 'film-k kutu-icerik kat'})
if season and episode:
t = t + 'saison0' + season
r = client.parseDOM(r, 'div', attrs={'class': 'play fa fa-play-circle'})
r = sorted(set(r))
r = [(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1])][0]
#r = sorted(set(r))
url0 = '%s%s' % ('http://www.filmenstreaminghd.com' , r)
print url0
url = client.replaceHTMLCodes(url0)
url = url0.encode('utf-8')
r = client.request(url, XHR=True, referer=url)
r = re.sub('(\n|\t)', '', r)
langue = re.compile('<b class=\"fa fa-cc\"></b><span>(.+?)</span>', re.MULTILINE | re.DOTALL).findall(r)[0]
if langue == 'VF':
langue = 'FR'
quality2 = re.compile('<div class=\"kalite\">(.+?)</div>', re.MULTILINE | re.DOTALL).findall(r)[0]
quality2 = re.sub('-', '', quality2)
if season and episode:
unLien0a = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri'})[0]
r = re.compile('Saison\s+0%s\s+\-\s+Episode\s+0%s(.+?)class=\"dropit-trigger\">' % (season, episode), re.MULTILINE | re.DOTALL).findall(unLien0a)[0]
unLien0b = client.parseDOM(r, 'li', ret='id')
else:
r = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri film'})
unLien0b = client.parseDOM(r, 'span', ret='id')
counter = 0
for unLienUrl in unLien0b:
if 'gf-' in unLienUrl:
continue
dataUrl = urllib.urlencode({'pid': unLienUrl[1:]})
dataUrl = client.request(url0, post=dataUrl, XHR=True, referer=url0)
try:
url = client.parseDOM(dataUrl, 'iframe', ret='src')[1]
except:
url = client.parseDOM(dataUrl, 'iframe', ret='src')[0]
if url.startswith('//'):
url = url.replace('//', '', 1)
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = url.encode('utf-8')
if '1080p' in quality2:
quality = '1080p'
elif '720p' in quality2 or 'bdrip' in quality2 or 'hdrip' in quality2:
quality = 'HD'
else:
quality = 'SD'
if 'dvdscr' in quality2 or 'r5' in quality2 or 'r6' in quality2:
quality2 = 'SCR'
elif 'camrip' in quality2 or 'tsrip' in quality2 or 'hdcam' in quality2 or 'hdts' in quality2 or 'dvdcam' in quality2 or 'dvdts' in quality2 or 'cam' in quality2 or 'telesync' in quality2 or 'ts' in quality2:
quality2 = 'CAM'
sources.append({'source': host, 'quality': quality, 'language': langue, 'url': url, 'direct': False, 'debridonly': False})
print sources
return sources
except:
return sources
def resolve(self, url):
return url
|
gpl-2.0
| 2,041,355,159,829,254,000
| 36.740331
| 224
| 0.495389
| false
| 3.792893
| false
| false
| false
|
vamdt/spider
|
douban/pics.py
|
1
|
1638
|
# coding=utf-8
import re
import urllib
import json
import os, random
BASE_DOWN_DIR = './download'
BASE_DOWN_POSTS_DIR = BASE_DOWN_DIR + '/posts'
BASE_URL = 'http://www.douban.com/photos/photo/2230938262/'
class AppURLopener(urllib.FancyURLopener):
version = "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.4 Safari/537.36"
urllib._urlopener = AppURLopener()
def main():
i = 0;
url = BASE_URL;
while(i<3):
i = i+1;
url = play(url, i);
def play(url, index):
f = urllib.urlopen(url)
html = f.read()
print html
pattern = re.compile(u'<a href="(http://www.douban.com/photos/photo/\d+/#image)" title=".+" id="next_photo">.+</a>',re.DOTALL)
url = pattern.findall(html)[0]
p2 = re.compile(u'<a class="mainphoto" href="\S+" title="\S+">\s+<img src="(http://img.+\.douban\.com/view/photo/photo/public/.+\.jpg)" />\s+</a>', re.DOTALL)
img_url = p2.findall(html)[0]
print img_url
create_dirs(BASE_DOWN_POSTS_DIR)
save_posts(img_url, index)
return url
def get_html(url):
return urllib.urlopen(url).read()
def create_dirs(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def save_posts(url, index):
html = get_html(url)
file_name = BASE_DOWN_POSTS_DIR + '/' + str(index) + '.jpg'
save( html, file_name)
def save(obj, name):
file = open(name, 'w')
file.write(str(obj))
file.close
def save_as_json(obj, name):
json_data = json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
save(json_data, name)
if __name__ == '__main__':
main()
|
mit
| 6,337,651,235,404,347,000
| 25.015873
| 162
| 0.616606
| false
| 2.762226
| false
| false
| false
|
ewindisch/nova
|
nova/compute/manager.py
|
1
|
251375
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import contextlib
import functools
import socket
import sys
import time
import traceback
import uuid
import eventlet.event
from eventlet import greenthread
import eventlet.timeout
from oslo.config import cfg
from oslo import messaging
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova import consoleauth
import nova.context
from nova import exception
from nova import hooks
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import external_event as external_event_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_group as instance_group_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import paths
from nova import rpc
from nova import safe_utils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
from nova.volume import encryptors
compute_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.StrOpt('default_access_ip_network_name',
help='Name of network to use to set access IPs for instances'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
help='Where instances are stored on disk'),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists"
" notifications"),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.IntOpt('network_allocate_retries',
default=0,
help="Number of times to retry network allocation on failures"),
]
interval_opts = [
cfg.IntOpt('bandwidth_poll_interval',
default=600,
help='Interval to pull network bandwidth usage info. Not '
'supported on all hypervisors. Set to 0 to disable.'),
cfg.IntOpt('sync_power_state_interval',
default=600,
help='Interval to sync power states between '
'the database and the hypervisor'),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance info_cache self "
"healing updates"),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help='Interval in seconds for gathering volume usages'),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help='Interval in seconds for polling shelved instances to '
'offload'),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'when shelved'),
cfg.IntOpt('instance_delete_interval',
default=300,
help=('Interval in seconds for retrying failed instance file '
'deletes'))
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status."
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
help="Action to take if a running deleted instance is detected."
"Valid options are 'noop', 'log', 'shutdown', or 'reap'. "
"Set to 'noop' to take no action."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="Number of seconds to wait between runs of the cleanup "
"task."),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
help=('The number of times to attempt to reap an instance\'s '
'files.')),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_opts(interval_opts)
CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.register_opts(instance_cleaning_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('enabled', 'nova.spice', group='spice')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.rdp', group='rdp')
CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp')
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
# Find migration argument. The argument cannot be
# defined by position because the wrapped functions
# do not have the same signature.
for arg in args:
if not isinstance(arg, migration_obj.Migration):
continue
status = arg.status
if status not in ['migrating', 'post-migrating']:
continue
arg.status = 'error'
try:
arg.save(context.elevated())
except Exception:
LOG.debug(_('Error setting migration status '
'for instance %s.') %
arg.instance_uuid, exc_info=True)
break
return decorated_function
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_("Task possibly preempted: %s") % e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
try:
self._instance_update(context,
kwargs['instance']['uuid'],
task_state=None)
except Exception:
pass
return decorated_function
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.func_code.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, kwargs['instance'],
e, sys.exc_info())
return decorated_function
def wrap_instance_event(function):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = utils.get_wrapped_function(function)
keyed_args = safe_utils.getcallargs(wrapped_func, context, *args,
**kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = 'compute_{0}'.format(function.func_name)
with compute_utils.EventReporter(context, self.conductor_api,
event_name, instance_uuid):
function(self, context, *args, **kwargs)
return decorated_function
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Cleaning up image %s") % image_id,
exc_info=True, instance=instance)
try:
image_service = glance.get_default_image_service()
image_service.delete(context, image_id)
except Exception:
LOG.exception(_("Error while trying to clean up image %s")
% image_id, instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
metas = ['metadata', 'system_metadata']
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = migration_obj.Migration._from_db_object(
context.elevated(), migration_obj.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
# TODO(danms): Remove me after Icehouse
def aggregate_object_compat(function):
"""Wraps a method that expects a new-world aggregate."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
aggregate = kwargs.get('aggregate')
if isinstance(aggregate, dict):
aggregate = aggregate_obj.Aggregate._from_db_object(
context.elevated(), aggregate_obj.Aggregate(),
aggregate)
kwargs['aggregate'] = aggregate
return function(self, context, *args, **kwargs)
return decorated_function
def _get_image_meta(context, image_ref):
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
@utils.synchronized(self._lock_name)
def _create_or_get_event():
if instance.uuid not in self._events:
self._events.setdefault(instance.uuid, {})
return self._events[instance.uuid].setdefault(
event_name, eventlet.event.Event())
LOG.debug(_('Preparing to wait for external event %(event)s '
'for instance %(uuid)s'), {'event': event_name,
'uuid': instance.uuid})
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
@utils.synchronized(self._lock_name)
def _pop_event():
events = self._events.get(instance.uuid)
if not events:
return None
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
return _event
return _pop_event()
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name)
def _clear_events():
return self._events.pop(instance.uuid, {})
return _clear_events()
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def instance_update(self, context, instance_uuid, updates):
return self._compute._instance_update(context,
instance_uuid,
**updates)
def provider_fw_rule_get_all(self, context):
return self._compute.conductor_api.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return self._compute.conductor_api.agent_build_get_by_triple(
context, hypervisor, os, architecture)
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param:instance: The instance for which an event is expected
:param:event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param:deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param:error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = external_event_obj.InstanceExternalEvent.make_key(
name, tag)
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='3.23')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_vol_usage_poll = 0
self._last_info_cache_heal = 0
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self._resource_tracker_dict = {}
self.instance_events = InstanceEvents()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nodename)
self._resource_tracker_dict[nodename] = rt
return rt
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
instance_ref = self.conductor_api.instance_update(context,
instance_uuid,
**kwargs)
if (instance_ref['host'] == self.host and
self.driver.node_is_available(instance_ref['node'])):
rt = self._get_resource_tracker(instance_ref.get('node'))
rt.update_usage(context, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance_uuid):
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug(_('Instance has been destroyed from under us while '
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
def _set_instance_obj_error_state(self, context, instance):
try:
instance.vm_state = vm_states.ERROR
instance.save()
except exception.InstanceNotFound:
LOG.debug(_('Instance has been destroyed from under us while '
'trying to set it to ERROR'),
instance_uuid=instance.uuid)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
filters['uuid'] = driver_uuids
local_instances = instance_obj.InstanceList.get_by_filters(
context, filters)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = instance_obj.InstanceList.get_by_filters(context, filters)
name_map = dict((instance.name, instance) for instance in instances)
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them.
"""
our_host = self.host
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
if instance.host != our_host:
LOG.info(_('Deleting instance as its host ('
'%(instance_host)s) is not equal to our '
'host (%(our_host)s).'),
{'instance_host': instance.host,
'our_host': our_host}, instance=instance)
destroy_disks = False
try:
network_info = self._get_instance_nw_info(context,
instance)
bdi = self._get_instance_volume_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
def _is_instance_storage_shared(self, context, instance):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
obj_base.obj_to_primitive(instance),
data))
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage'),
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
instance.destroy()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = quotas_obj.Quotas()
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
quotas.reserve(context, project_id=project_id, user_id=user_id,
instances=-1, cores=-instance.vcpus,
ram=-instance.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
instance.system_metadata)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
if CONF.vnc_enabled or CONF.spice.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(context,
instance.uuid)
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(context,
instance.uuid)
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug(_("Instance is in %s state."),
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
finally:
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug(_("Instance failed to spawn correctly, "
"setting to ERROR state"), instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug(_("Instance in transitional state %s at start-up "
"clearing task state"),
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_('Service started deleting the instance during '
'the previous run, but did not finish. Restarting '
'the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
self._delete_instance(context, instance, bdms)
except Exception:
# we don't want that an exception blocks the init_host
msg = _('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_error_state(context, instance['uuid'])
finally:
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception as e:
LOG.exception(_('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug(_('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.'),
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_volume_block_device_info(
context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_('Failed to resume instance'), instance=instance)
self._set_instance_error_state(context, instance.uuid)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'firewall rules'), instance=instance)
def handle_lifecycle_event(self, event):
LOG.info(_("Lifecycle event %(state)d on VM %(uuid)s") %
{'state': event.get_transition(),
'uuid': event.get_instance_uuid()})
context = nova.context.get_admin_context()
instance = instance_obj.Instance.get_by_uuid(
context, event.get_instance_uuid())
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
else:
LOG.warning(_("Unexpected power state %d") %
event.get_transition())
if vm_power_state is not None:
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug(_("Event %s arrived for non-existent instance. The "
"instance was probably deleted.") % event)
else:
LOG.debug(_("Ignoring event %s") % event)
def init_virt_events(self):
self.driver.register_event_listener(self.handle_events)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = instance_obj.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context())
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug(_('Checking state'), instance=instance)
try:
return self.driver.get_info(instance)["state"]
except exception.NotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
#TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@wrap_exception()
def refresh_security_group_members(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronise the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance['uuid'])
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'security groups.'), instance=instance)
return _sync_refresh()
@wrap_exception()
def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
def _get_instance_nw_info(self, context, instance, use_slave=False):
"""Get a list of dictionaries of network data of an instance."""
if (not hasattr(instance, 'system_metadata') or
len(instance['system_metadata']) == 0):
# NOTE(danms): Several places in the code look up instances without
# pulling system_metadata for performance, and call this function.
# If we get an instance without it, re-fetch so that the call
# to network_api (which requires it for instance_type) will
# succeed.
instance = instance_obj.Instance.get_by_uuid(context,
instance['uuid'],
use_slave=use_slave)
network_info = self.network_api.get_instance_nw_info(context,
instance)
return network_info
def _await_block_device_map_created(self, context, vol_id, max_tries=60,
wait_between=1):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
# TODO(harlowja): make the max_tries configurable or dynamic?
attempts = 0
start = time.time()
while attempts < max_tries:
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status != 'available':
LOG.warn(_("Volume id: %s finished being created but was"
" not set as 'available'"), vol_id)
# NOTE(harlowja): return how many attempts were tried
return attempts + 1
greenthread.sleep(wait_between)
attempts += 1
# NOTE(harlowja): Should only happen if we ran out of attempts
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempts)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
try:
decoded = base64.b64decode(contents)
return path, decoded
except TypeError:
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec):
"""Launch a new instance with specified options."""
extra_usage_info = {}
def notify(status, msg="", fault=None, **kwargs):
"""Send a create.{start,error,end} notification."""
type_ = "create.%(status)s" % dict(status=status)
info = extra_usage_info.copy()
info['message'] = unicode(msg)
self._notify_about_instance_usage(context, instance, type_,
extra_usage_info=info, fault=fault, **kwargs)
try:
self._prebuild_instance(context, instance)
if request_spec and request_spec.get('image'):
image_meta = request_spec['image']
else:
image_meta = {}
extra_usage_info = {"image_name": image_meta.get('name', '')}
notify("start") # notify that build is starting
instance, network_info = self._build_instance(context,
request_spec, filter_properties, requested_networks,
injected_files, admin_password, is_first_time, node,
instance, image_meta, legacy_bdm_in_spec)
notify("end", msg=_("Success"), network_info=network_info)
except exception.RescheduledException as e:
# Instance build encountered an error, and has been rescheduled.
notify("error", fault=e)
except exception.BuildAbortException as e:
# Instance build aborted due to a non-failure
LOG.info(e)
notify("end", msg=unicode(e)) # notify that build is done
except Exception as e:
# Instance build encountered a non-recoverable error:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance['uuid'])
notify("error", fault=e) # notify that build failed
def _prebuild_instance(self, context, instance):
self._check_instance_exists(context, instance)
try:
self._start_building(context, instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
# Quickly bail out of here
raise exception.BuildAbortException(instance_uuid=instance['uuid'],
reason=msg)
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# anti-affinity. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. This is a validation step to
# make sure that starting the instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_uuid = scheduler_hints.get('group')
if not group_uuid:
return
@utils.synchronized(group_uuid)
def _do_validation(context, instance, group_uuid):
group = instance_group_obj.InstanceGroup.get_by_uuid(context,
group_uuid)
if 'anti-affinity' not in group.policies:
return
group_hosts = group.get_hosts(context, exclude=[instance['uuid']])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance['uuid'],
reason=msg)
_do_validation(context, instance, group_uuid)
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
context = context.elevated()
# If neutron security groups pass requested security
# groups to allocate_for_instance()
if request_spec and self.is_neutron_security_groups:
security_groups = request_spec.get('security_group')
else:
security_groups = []
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug(_("No node specified, defaulting to %s"), node)
network_info = None
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
# b64 decode the files to inject:
injected_files_orig = injected_files
injected_files = self._decode_files(injected_files)
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups,
dhcp_options)
self._instance_update(
context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image_meta,
bdms)
block_device_info = self._prep_block_device(
context, instance, bdms)
set_access_ip = (is_first_time and
not instance['access_ip_v4'] and
not instance['access_ip_v6'])
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password,
set_access_ip=set_access_ip)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the spawn
# Make sure the async call finishes
msg = _("Instance disappeared during build")
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._deallocate_network(context, instance)
except Exception:
msg = _('Failed to dealloc network '
'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
instance_uuid=instance['uuid'],
reason=msg)
except exception.UnexpectedTaskStateError as e:
# Don't try to reschedule, just log and reraise.
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except Exception:
exc_info = sys.exc_info()
# try to re-schedule instance:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
rescheduled = self._reschedule_or_error(context, instance,
exc_info, requested_networks, admin_password,
injected_files_orig, is_first_time, request_spec,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
# log the original build error
self._log_original_error(exc_info, instance['uuid'])
raise exception.RescheduledException(
instance_uuid=instance['uuid'],
reason=unicode(exc_info[1]))
else:
# not re-scheduling, go to error:
raise exc_info[0], exc_info[1], exc_info[2]
# spawn success
return instance, network_info
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_('Error: %s') % exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule_or_error(self, context, instance, exc_info,
requested_networks, admin_password, injected_files, is_first_time,
request_spec, filter_properties, bdms=None,
legacy_bdm_in_spec=True):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
instance_uuid = instance['uuid']
rescheduled = False
compute_utils.add_instance_fault_from_exc(context, self.conductor_api,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'instance.create.error', fault=exc_info[1])
try:
LOG.debug(_("Clean up resource before rescheduling."),
instance=instance)
if bdms is None:
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
self._shutdown_instance(context, instance,
bdms, requested_networks)
self._cleanup_volumes(context, instance['uuid'], bdms)
except Exception:
# do not attempt retry if clean up failed:
with excutils.save_and_reraise_exception():
self._log_original_error(exc_info, instance_uuid)
try:
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
legacy_bdm_in_spec)
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance['uuid'],
self.scheduler_rpcapi.run_instance, method_args,
task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
instance_uuid=instance_uuid)
return rescheduled
def _reschedule(self, context, request_spec, filter_properties,
instance_uuid, scheduler_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug(_("Retry info not present, will not reschedule"),
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug(_("No request spec, will not reschedule"),
instance_uuid=instance_uuid)
return
request_spec['instance_uuids'] = [instance_uuid]
LOG.debug(_("Re-scheduling %(method)s: attempt %(num)d") %
{'method': scheduler_method.func_name,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception(*exc_info)
scheduler_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = instance_obj.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance['created_at'], timeout):
self._set_instance_error_state(context, instance['uuid'])
LOG.warn(_("Instance build timed out. Set to error state."),
instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance['name']):
raise exception.InstanceExists(name=instance['name'])
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
None))
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
LOG.debug(_("Allocating IP information in the background."),
instance=instance)
retries = CONF.network_allocate_retries
if retries < 0:
LOG.warn(_("Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."),
{'retries': retries})
attempts = retries > 1 and retries + 1 or 1
retry_time = 1
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options)
LOG.debug(_('Instance network_info: |%s|'), nwinfo,
instance=instance)
# NOTE(alaski): This can be done more cleanly once we're sure
# we'll receive an object.
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
sys_meta['network_allocated'] = 'True'
self._instance_update(context, instance['uuid'],
system_metadata=sys_meta)
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warn(_('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
return self._get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
if not instance.access_ip_v4 and not instance.access_ip_v6:
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
network_name = CONF.default_access_ip_network_name
if not network_name:
return network_info
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
instance.save()
break
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING,
expected_task_state=[None])
is_vpn = pipelib.is_vpn_image(instance['image_ref'])
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _default_block_device_names(self, context, instance,
image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_instance = False
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance['root_device_name'] = root_device_name
update_instance = True
elif instance['root_device_name']:
root_device_name = instance['root_device_name']
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance['root_device_name'] = root_device_name
root_bdm.device_name = root_device_name
update_instance = update_root_bdm = True
if update_instance:
self._instance_update(context, instance['uuid'],
root_device_name=root_device_name)
if update_root_bdm:
root_bdm.save()
def _is_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot') and
driver_block_device.is_implemented(bdm))
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
swap = filter(block_device.new_format_is_swap,
block_devices)
block_device_mapping = filter(_is_mapping, block_devices)
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _prep_block_device(self, context, instance, bdms):
"""Set up the block device for an instance with error logging."""
try:
block_device_info = {
'root_device_name': instance['root_device_name'],
'swap': driver_block_device.convert_swap(bdms),
'ephemerals': driver_block_device.convert_ephemerals(bdms),
'block_device_mapping': (
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
self.driver) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
self.driver, self._await_block_device_map_created))
}
if self.use_legacy_block_device_info:
for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
block_device_info[bdm_type] = \
driver_block_device.legacy_block_devices(
block_device_info[bdm_type])
# Get swap out of the list
block_device_info['swap'] = driver_block_device.get_swap(
block_device_info['swap'])
return block_device_info
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed block device setup'),
instance=instance)
@object_compat
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password,
set_access_ip=False):
"""Spawn an instance with error logging and update its power state."""
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed to spawn'), instance=instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
def _set_access_ip_values():
"""Add access ip values for a given instance.
If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose
from, an arbitrary one will be chosen.
"""
network_name = CONF.default_access_ip_network_name
if not network_name:
return
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if ip['version'] == 6:
instance.access_ip_v6 = ip['address']
return
if set_access_ip:
_set_access_ip_values()
if network_info is not None:
network_info.wait(do_raise=True)
instance.save(expected_task_state=task_states.SPAWNING)
return instance
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug(_('Deallocating network for instance'), instance=instance)
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
def _get_instance_volume_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform volumes to the driver block_device format."""
if not bdms:
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance['uuid']))
block_device_mapping = (
driver_block_device.convert_volumes(bdms) +
driver_block_device.convert_snapshots(bdms) +
driver_block_device.convert_images(bdms))
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_mapping = [
bdm for bdm in block_device_mapping
if bdm.get('connection_info')]
else:
block_device_mapping = driver_block_device.refresh_conn_infos(
block_device_mapping, context, instance, self.volume_api,
self.driver)
if self.use_legacy_block_device_info:
block_device_mapping = driver_block_device.legacy_block_devices(
block_device_mapping)
return {'block_device_mapping': block_device_mapping}
# NOTE(mikal): No object_compat wrapper on this method because its
# callers all pass objects already
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
@utils.synchronized(instance.uuid)
def do_build_and_run_instance(context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = _('Instance disappeared before build.')
LOG.debug(msg, instance=instance)
return
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug(_('No node specified, defaulting to %s'), node)
try:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits)
except exception.RescheduledException as e:
LOG.debug(e.format_message(), instance=instance)
# dhcp_options are per host, so if they're set we need to
# deallocate the networks and reallocate on the next host.
if self.driver.dhcp_options_for_instance(instance):
self._cleanup_allocated_networks(context, instance,
requested_networks)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
except exception.InstanceNotFound:
msg = _('Instance disappeared during build.')
LOG.debug(msg, instance=instance)
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._set_instance_error_state(context, instance.uuid)
except exception.UnexpectedDeletingTaskStateError as e:
# The instance is deleting, so clean up but don't error.
LOG.debug(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
except Exception:
# Should not reach here.
msg = _('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._set_instance_error_state(context, instance.uuid)
do_build_and_run_instance(context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node, limits)
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
try:
rt = self._get_resource_tracker(node)
with rt.instance_claim(context, instance, limits):
with self._build_resources(context, instance,
requested_networks, security_groups, image,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
self.driver.spawn(context, instance, image,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
self._notify_about_instance_usage(context, instance,
'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.end', fault=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException,
exception.FixedIpLimitExceeded,
exception.NoMoreNetworks) as e:
LOG.exception(_('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=str(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.SPAWNING)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image, block_device_mapping):
resources = {}
try:
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception('Failed to allocate network(s)', instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(context, instance, image,
block_device_mapping)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_('Failure prepping block device'),
instance=instance)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
LOG.exception(_('Instance failed to spawn'), instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
try:
self._cleanup_build_resources(context, instance,
block_device_mapping)
except Exception:
ctxt.reraise = False
msg = _('Could not clean up failed build,'
' not rescheduling')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
instance.system_metadata['network_allocated'] = 'False'
instance.save()
except Exception:
msg = _('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
def _cleanup_build_resources(self, context, instance,
block_device_mapping):
# Don't clean up networks here in case we reschedule
try:
self._cleanup_volumes(context, instance.uuid,
block_device_mapping)
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Failed to cleanup volumes for failed build,'
' not rescheduling')
LOG.exception(msg, instance=instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def run_instance(self, context, instance, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node, legacy_bdm_in_spec):
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance['uuid'])
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance,
legacy_bdm_in_spec)
do_run_instance()
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to deallocate network for instance.'),
instance=instance)
self._set_instance_error_state(context, instance['uuid'])
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True):
"""Shutdown an instance on this host."""
context = context.elevated()
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
# get network info before tearing down
try:
network_info = self._get_instance_nw_info(context, instance)
except (exception.NetworkNotFound, exception.NoMoreFixedIps,
exception.InstanceInfoCacheNotFound):
network_info = network_model.NetworkInfo()
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
try:
self.driver.destroy(context, instance, network_info,
block_device_info)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
self._try_deallocate_network(context, instance,
requested_networks)
self._try_deallocate_network(context, instance, requested_networks)
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id)
except exception.DiskNotFound as exc:
LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.warn(_('Ignoring VolumeNotFound: %s') % exc,
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
def _cleanup_volumes(self, context, instance_uuid, bdms):
for bdm in bdms:
LOG.debug(_("terminating bdm %s") % bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
# NOTE(vish): bdms will be deleted on instance destroy
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms,
reservations=None):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
"""
instance_uuid = instance['uuid']
image = instance['image_ref']
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
self._quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
except Exception:
pass
reservations = None
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug(_('Events pending at deletion: %(events)s'),
{'events': ','.join(events.keys()),
'instance': instance})
db_inst = obj_base.obj_to_primitive(instance)
instance.info_cache.delete()
self._notify_about_instance_usage(context, instance,
"delete.start")
self._shutdown_instance(context, db_inst, bdms)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
try:
self._cleanup_volumes(context, instance_uuid, bdms)
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
# if a delete task succeed, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
system_meta = utils.instance_sys_meta(instance)
db_inst = self.conductor_api.instance_destroy(
context, obj_base.obj_to_primitive(instance))
instance = instance_obj.Instance._from_db_object(context, instance,
db_inst)
except Exception:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
quotas = quotas_obj.Quotas.from_reservations(context,
reservations,
instance=instance)
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this when we bump the RPC major version to 4.0
if (bdms and
any(not isinstance(bdm, block_device_obj.BlockDeviceMapping)
for bdm in bdms)):
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
@utils.synchronized(instance['uuid'])
def do_terminate_instance(instance, bdms):
try:
self._delete_instance(context, instance, bdms,
reservations=reservations)
except exception.InstanceNotFound:
LOG.info(_("Instance disappeared during terminate"),
instance=instance)
except Exception as error:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_error_state(context, instance['uuid'])
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance):
"""Stopping an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=task_states.POWERING_OFF)
self._notify_about_instance_usage(context, instance, "power_off.end")
def _power_on(self, context, instance):
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
self._power_on(context, instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
self._quota_commit(context, reservations, project_id=project_id,
user_id=user_id)
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
detach_block_devices(context, bdms)
if not recreate:
self.driver.destroy(context, instance, network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
@object_compat
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
context = context.elevated()
# NOTE (ndipanov): If we get non-object BDMs, just get them from the
# db again, as this means they are sent in the old format and we want
# to avoid converting them back when we can just get them.
# Remove this on the next major RPC version bump
if (bdms and
any(not isinstance(bdm, block_device_obj.BlockDeviceMapping)
for bdm in bdms)):
bdms = None
orig_vm_state = instance.vm_state
with self._error_out_instance_on_exception(context, instance.uuid):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
# To cover case when admin expects that instance files are on
# shared storage, but not accessible and vice versa
if on_shared_storage != self.driver.instance_on_disk(instance):
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_("disk not on shared storagerebuilding from:"
" '%s'") % str(image_ref))
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node['hypervisor_hostname']
except exception.NotFound:
LOG.exception(_('Failed to get compute_info for %s') %
self.host)
finally:
instance.host = self.host
instance.node = node_name
instance.save()
if image_ref:
image_meta = _get_image_meta(context, image_ref)
else:
image_meta = {}
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
self.conductor_api.notify_usage_exists(context,
obj_base.obj_to_primitive(instance),
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
self.network_api.setup_networks_on_host(
context, instance, self.host)
network_info = self._get_instance_nw_info(context, instance)
if bdms is None:
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
block_device_info = \
self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self.volume_api.detach(context, bdm.volume_id)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral)
try:
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
if orig_vm_state == vm_states.STOPPED:
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_("Detaching from volume api: %s") % volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
context = context.elevated()
LOG.audit(_("Rebooting instance"), context=context, instance=instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.save()
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
context=context, instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance['vm_state'] == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_('Reboot failed but instance is running'),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
ctxt.reraise = False
else:
LOG.error(_('Cannot reboot instance: %s'), error,
context=context, instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warn(_("Instance disappeared during reboot"),
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
if rotation < 0:
raise exception.RotationRequiredForBackup()
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param instance: an Instance dict
:param image_id: glance.db.sqlalchemy.models.Image.Id
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug(_("Instance not found, could not set state %s "
"for instance."),
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug(_("Instance being deleted, snapshot cannot continue"),
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
try:
instance.power_state = current_power_state
instance.save()
LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = _("Instance disappeared during snapshot")
LOG.debug(msg, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _("Image not found during snapshot")
LOG.warn(msg, instance=instance)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@object_compat
@messaging.expected_exceptions(NotImplementedError)
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
image_service = glance.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = image_service.detail(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"),
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups"), excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %s"), image_id,
instance=instance)
image_service.delete(context, image_id)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('Failed to set admin password. Instance %s is not'
' running') % instance["uuid"]
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
else:
try:
self.driver.set_admin_password(instance, new_pass)
LOG.audit(_("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
_msg = _('set_admin_password is not implemented '
'by this driver or guest instance.')
LOG.warn(_msg, instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_msg)
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception as e:
# Catch all here because this could be anything.
LOG.exception(_('set_admin_password failed: %s') % e,
instance=instance)
self._set_instance_error_state(context,
instance['uuid'])
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running (state: '
'%(current_state)s expected: %(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.audit(_('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance):
"""Determine what image should be used to boot the rescue VM."""
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
# 1. First try to use base image associated with instance's current
# image.
#
# The idea here is to provide the customer with a rescue environment
# which they are familiar with. So, if they built their instance off of
# a Debian image, their rescue VM will also be Debian.
if not rescue_image_ref:
# 2. As a last resort, use instance's current image
LOG.warn(_('Unable to find a different image to use for rescue VM,'
' using instance\'s current image'))
rescue_image_ref = instance['image_ref']
image_service, image_id = glance.get_remote_image_service(
context, rescue_image_ref)
image_meta = compute_utils.get_image_metadata(context, image_service,
rescue_image_ref,
instance)
# NOTE(belliott) bug #1227350 - xenapi needs the actual image id
image_meta['id'] = rescue_image_ref
return image_meta
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
def rescue_instance(self, context, instance, rescue_password):
"""Rescue an instance on this host.
:param rescue_password: password to set on rescue instance
"""
context = context.elevated()
LOG.audit(_('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self._get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance)
extra_usage_info = {'rescue_image_name':
rescue_image_meta.get('name', '')}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_("Error trying to Rescue Instance"),
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance['uuid'],
reason=_("Driver Error: %s") % unicode(e))
self.conductor_api.notify_usage_exists(context, instance,
current_period=True)
current_power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = current_power_state
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
"""Rescue an instance on this host."""
context = context.elevated()
LOG.audit(_('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.unrescue(instance,
network_info)
current_power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = current_power_state
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug(_("Changing instance metadata according to %r"),
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
def _cleanup_stored_instance_types(self, migration, instance,
restore_old=False):
"""Clean up "old" and "new" instance_type information stored in
instance's system_metadata. Optionally update the "current"
instance_type to the saved old one first.
Returns the updated system_metadata as a dict, as well as the
post-cleanup current instance type.
"""
sys_meta = instance.system_metadata
if restore_old:
instance_type = flavors.extract_flavor(instance, 'old_')
sys_meta = flavors.save_flavor_info(sys_meta, instance_type)
else:
instance_type = flavors.extract_flavor(instance)
flavors.delete_flavor_info(sys_meta, 'old_')
flavors.delete_flavor_info(sys_meta, 'new_')
return sys_meta, instance_type
@wrap_exception()
@wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
@utils.synchronized(instance['uuid'])
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug(_("Going to confirm migration %s") % migration_id,
context=context, instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = migration_obj.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_("Migration %s is not found during confirmation") %
migration_id, context=context, instance=instance)
return
if migration.status == 'confirmed':
LOG.info(_("Migration %s is already confirmed") %
migration_id, context=context, instance=instance)
return
elif migration.status not in ('finished', 'confirming'):
LOG.warn(_("Unexpected confirmation status '%(status)s' of "
"migration %(id)s, exit confirmation process") %
{"status": migration.status, "id": migration_id},
context=context, instance=instance)
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata']
try:
instance = instance_obj.Instance.get_by_uuid(context,
instance.uuid, expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_("Instance is not found during confirmation"),
context=context, instance=instance)
return
self._confirm_resize(context, instance, reservations=reservations,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, reservations=None,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(danms): delete stashed migration information
sys_meta, instance_type = self._cleanup_stored_instance_types(
migration, instance)
sys_meta.pop('old_vm_state', None)
instance.system_metadata = sys_meta
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration, instance,
network_info)
migration.status = 'confirmed'
migration.save(context.elevated())
rt = self._get_resource_tracker(migration.source_node)
rt.drop_resize_claim(instance, prefix='old_')
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug(_("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'."), vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
self._quota_commit(context, reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
instance_p = obj_base.obj_to_primitive(instance)
migration_p = obj_base.obj_to_primitive(migration)
self.conductor_api.network_migrate_instance_start(context,
instance_p,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
self.driver.destroy(context, instance, network_info,
block_device_info)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
migration.save(context.elevated())
rt = self._get_resource_tracker(instance.node)
rt.drop_resize_claim(instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
reservations=reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
with self._error_out_instance_on_exception(context, instance.uuid,
reservations):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
sys_meta, instance_type = self._cleanup_stored_instance_types(
migration, instance, True)
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE)
instance.system_metadata = sys_meta
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.instance_type_id = instance_type['id']
instance.host = migration['source_compute']
instance.node = migration['source_node']
instance.save()
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
instance_p = obj_base.obj_to_primitive(instance)
migration_p = obj_base.obj_to_primitive(migration)
self.conductor_api.network_migrate_instance_finish(context,
instance_p,
migration_p)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_("Updating instance to original state: '%s'") %
old_vm_state)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
self._quota_commit(context, reservations)
def _quota_commit(self, context, reservations, project_id=None,
user_id=None):
if reservations:
self.conductor_api.quota_commit(context, reservations,
project_id=project_id,
user_id=user_id)
def _quota_rollback(self, context, reservations, project_id=None,
user_id=None):
if reservations:
self.conductor_api.quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
def _prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
if not filter_properties:
filter_properties = {}
if not instance['host']:
self._set_instance_error_state(context, instance['uuid'])
msg = _('Instance has no source host')
raise exception.MigrationError(msg)
same_host = instance['host'] == self.host
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance['uuid'])
msg = _('destination same as source!')
raise exception.MigrationError(msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
sys_meta = instance.system_metadata
flavors.save_flavor_info(sys_meta, instance_type, prefix='new_')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance['vm_state']
LOG.debug(_('Stashing vm_state: %s'), vm_state, instance=instance)
sys_meta['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
limits=limits) as claim:
LOG.audit(_('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(context, instance,
claim.migration, image, instance_type, reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug(_("No node specified, defaulting to %s"), node)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, reservations,
request_spec, filter_properties,
node)
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, reservations, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, reservations, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance['uuid']
try:
# NOTE(comstud): remove the scheduler RPCAPI method when
# this is adjusted to send to conductor... and then
# deprecate the scheduler manager method.
scheduler_method = self.scheduler_rpcapi.prep_resize
instance_p = obj_base.obj_to_primitive(instance)
method_args = (instance_p, instance_type, image, request_spec,
filter_properties, reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance_uuid, scheduler_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
self.conductor_api, instance, exc_info[1],
exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
raise exc_info[0], exc_info[1], exc_info[2]
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type):
"""Starts the migration of a running instance to another host."""
with self._error_out_instance_on_exception(context, instance.uuid,
reservations):
if not instance_type:
instance_type = flavor_obj.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
migration.status = 'migrating'
migration.save(context.elevated())
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
instance_p = obj_base.obj_to_primitive(instance)
self.conductor_api.network_migrate_instance_start(context,
instance_p,
migration_p)
migration.status = 'post-migrating'
migration.save(context.elevated())
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = flavors.extract_flavor(instance)
sys_meta = instance.system_metadata
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_sate is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = sys_meta.get('old_vm_state', vm_states.ACTIVE)
flavors.save_flavor_info(sys_meta,
old_instance_type,
prefix='old_')
if old_instance_type_id != new_instance_type_id:
instance_type = flavors.extract_flavor(instance, prefix='new_')
flavors.save_flavor_info(sys_meta, instance_type)
instance.instance_type_id = instance_type['id']
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.system_metadata = sys_meta
instance.save()
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
instance_p = obj_base.obj_to_primitive(instance)
migration_p = obj_base.obj_to_primitive(migration)
self.conductor_api.network_migrate_instance_finish(context,
instance_p,
migration_p)
network_info = self._get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.system_metadata = sys_meta
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
block_device_info = self._get_instance_volume_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on)
migration.status = 'finished'
migration.save(context.elevated())
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
try:
self._finish_resize(context, instance, migration,
disk_info, image)
self._quota_commit(context, reservations)
except Exception as error:
LOG.exception(_('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
self._quota_rollback(context, reservations)
except Exception as qr_error:
LOG.exception(_("Failed to rollback quota for failed "
"finish_resize: %s"),
qr_error, instance=instance)
self._set_instance_error_state(context, instance['uuid'])
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
self.network_api.add_fixed_ip_to_instance(context, instance,
network_id)
network_info = self._inject_network_info(context, instance)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
self.network_api.remove_fixed_ip_from_instance(context, instance,
address)
network_info = self._inject_network_info(context, instance)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.audit(_('Pausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
self.driver.pause(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.audit(_('Unpausing'), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
self.driver.unpause(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
# TODO(russellb) Remove the unused host parameter from the driver API
return self.driver.host_power_action(None, action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(russellb) Remove the unused host parameter from the driver API
return self.driver.set_host_enabled(None, enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime(self.host)
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.audit(_("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
with self._error_out_instance_on_exception(context, instance['uuid'],
instance_state=instance['vm_state']):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.audit(_('Resuming'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
"""
self.conductor_api.notify_usage_exists(
context, obj_base.obj_to_primitive(instance),
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.strtime()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = current_power_state
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.shelve_offload_instance(context, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: an Instance dict
"""
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
instance.host = None
instance.node = None
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties=None, node=None):
"""Unshelve the instance.
:param context: request context
:param instance: an Instance dict
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance['uuid'])
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
instance.task_state = task_states.SPAWNING
instance.save()
network_info = self._get_instance_nw_info(context, instance)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug(_('No node specified, defaulting to %s'), node)
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
try:
with rt.instance_claim(context, instance, limits):
self.driver.spawn(context, instance, image, injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed to spawn'), instance=instance)
if image:
image_service = glance.get_default_image_service()
image_service.delete(context, image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.SPAWNING)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
@reverts_task_state
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug(_('Reset network'), context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance):
"""Inject network info for the given instance."""
LOG.debug(_('Inject network info'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_('network_info to inject: |%s|'), network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
return network_info
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
self._inject_network_info(context, instance)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
context = context.elevated()
LOG.audit(_("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(context, instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug(_("Getting vnc console"), instance=instance)
token = str(uuid.uuid4())
if not CONF.vnc_enabled:
raise exception.ConsoleTypeInvalid(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_vnc_console(context, instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
except exception.InstanceNotFound:
if instance['vm_state'] != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug(_("Getting spice console"), instance=instance)
token = str(uuid.uuid4())
if not CONF.spice.enabled:
raise exception.ConsoleTypeInvalid(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_spice_console(context, instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
except exception.InstanceNotFound:
if instance['vm_state'] != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
@object_compat
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug(_("Getting RDP console"), instance=instance)
token = str(uuid.uuid4())
if not CONF.rdp.enabled:
raise exception.ConsoleTypeInvalid(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_rdp_console(context, instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
except exception.InstanceNotFound:
if instance['vm_state'] != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@object_compat
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info['port'] == port
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus=None, device_type=None):
# NOTE(ndipanov): disk_bus and device_type will be set to None if not
# passed (by older clients) and defaulted by the virt driver. Remove
# default values on the next major RPC version bump.
@utils.synchronized(instance['uuid'])
def do_reserve():
bdms = (
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
device_name = compute_utils.get_device_name_for_instance(
context, instance, bdms, device)
# NOTE(vish): create bdm here to avoid race condition
bdm = block_device_obj.BlockDeviceMapping(
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid,
volume_id=volume_id or 'reserved',
device_name=device_name,
disk_bus=disk_bus, device_type=device_type)
bdm.create(context)
return device_name
return do_reserve()
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy(context)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance['name']):
LOG.warn(_('Detaching volume from unknown instance'),
context=context, instance=instance)
encryption = encryptors.get_encryption_metadata(
context, self.volume_api, volume_id, connection_info)
self.driver.detach_volume(connection_info,
instance,
mp,
encryption=encryption)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_('Failed to detach volume %(volume_id)s '
'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
"""Detach a volume from an instance."""
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
if CONF.volume_usage_poll_interval > 0:
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance['name'], mp)
except NotImplementedError:
pass
if vol_stats:
LOG.debug(_("Updating volume usage cache with totals"))
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
self.conductor_api.vol_usage_update(context, volume_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance,
update_totals=True)
self._detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
self.volume_api.detach(context.elevated(), volume_id)
bdm.destroy()
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
new_cinfo['serial'] = old_cinfo['serial']
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
new_volume_id):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint)
except Exception: # pylint: disable=W0702
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
msg = _("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg % {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
msg = _("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg % {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# If Cinder initiated the swap, it will keep
# the original ID
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
self.volume_api.attach(context,
new_volume_id,
instance['uuid'],
mountpoint)
# Remove old connection
self.volume_api.detach(context.elevated(), old_volume_id)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, old_volume_id, instance_uuid=instance.uuid)
connector = self.driver.get_volume_connector(instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id)
save_volume_id = comp_ret['save_volume_id']
# Update bdm
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'delete_on_termination': False,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'volume_size': None,
'no_device': None}
bdm.update(values)
bdm.save()
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
try:
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
self._detach_volume(context, instance, bdm)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@object_compat
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip)
if len(network_info) != 1:
LOG.error(_('allocate_port_for_instance returned %(ports)s ports')
% dict(ports=len(network_info)))
raise exception.InterfaceAttachFailed(instance=instance)
image_ref = instance.get('image_ref')
image_service, image_id = glance.get_remote_image_service(
context, image_ref)
image_meta = compute_utils.get_image_metadata(
context, image_service, image_ref, instance)
self.driver.attach_interface(instance, image_meta, network_info[0])
return network_info[0]
@object_compat
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
# FIXME(comstud): Why does this need elevated context?
network_info = self._get_instance_nw_info(context.elevated(),
instance)
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
self.network_api.deallocate_port_for_instance(context, instance,
port_id)
self.driver.detach_interface(instance, condemned)
def _get_compute_info(self, context, host):
compute_node_ref = self.conductor_api.service_get_by_compute_host(
context, host)
try:
return compute_node_ref['compute_node'][0]
except IndexError:
raise exception.NotFound(_("Host %s not found") % host)
@wrap_exception()
@wrap_instance_fault
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param context: security context
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info
"""
src_compute_info = self._get_compute_info(ctxt, instance.host)
dst_compute_info = self._get_compute_info(ctxt, CONF.host)
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
migrate_data = {}
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
if 'migrate_data' in dest_check_data:
migrate_data.update(dest_check_data['migrate_data'])
return migrate_data
@wrap_exception()
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
dest_check_data['is_volume_backed'] = is_volume_backed
return self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
@object_compat
@wrap_exception()
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data : if not None, it is a dict which holds data
required for live migration without shared storage.
"""
block_device_info = self._get_instance_volume_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
pre_live_migration_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
return pre_live_migration_data
@wrap_exception()
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migrate_data):
"""Executing live migration.
:param context: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: implementation specific params
"""
# Create a local copy since we'll be modifying the dictionary
migrate_data = dict(migrate_data or {})
try:
if block_migration:
disk = self.driver.get_instance_disk_info(instance['name'])
else:
disk = None
pre_migration_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
migrate_data['pre_live_migration_result'] = pre_migration_data
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Pre live migration failed at %s'),
dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance_ref,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_('_post_live_migration() is started..'),
instance=instance_ref)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance_ref['uuid'])
# Cleanup source host post live-migration
block_device_info = self._get_instance_volume_block_device_info(
ctxt, instance_ref, bdms)
self.driver.post_live_migration(ctxt, instance_ref, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance_ref)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance_ref)
self._notify_about_instance_usage(ctxt, instance_ref,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance_ref,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.conductor_api.network_migrate_instance_start(ctxt,
instance_ref,
migration)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance_ref, block_migration, dest)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
is_shared_storage = True
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
if block_migration or not is_shared_storage:
self.driver.cleanup(ctxt, instance_ref, network_info)
else:
# self.driver.destroy() usually performs vif unplugging
# but we must do it explicitly here when block_migration
# is false, as the network devices at the source must be
# torn down
try:
self.driver.unplug_vifs(instance_ref, network_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance_ref)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(ctxt, instance_ref,
self.host, teardown=True)
self.instance_events.clear_events_for_instance(instance_ref)
self._notify_about_instance_usage(ctxt, instance_ref,
"live_migration._post.end",
network_info=network_info)
LOG.info(_('Migrating instance to %s finished successfully.'),
dest, instance=instance_ref)
LOG.info(_("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance_ref)
if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled:
if CONF.cells.enable:
self.cells_rpcapi.consoleauth_delete_tokens(ctxt,
instance_ref['uuid'])
else:
self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt,
instance_ref['uuid'])
@object_compat
@wrap_exception()
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance['host'],
'dest_compute': self.host, }
self.conductor_api.network_migrate_instance_finish(context,
instance,
migration)
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.post_live_migration_at_destination(context, instance,
network_info,
block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
node_name = None
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node['hypervisor_hostname']
except exception.NotFound:
LOG.exception(_('Failed to get compute_info for %s') % self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.node = node_name
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, block_migration, migrate_data=None):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
:param migrate_data:
if not none, contains implementation specific data.
"""
host = instance['host']
instance = self._instance_update(context, instance['uuid'],
host=host, vm_state=vm_states.ACTIVE,
task_state=None, expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
for bdm in (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance['uuid'])):
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(context, instance,
bdm.volume_id, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
# any empty images has to be deleted.
# Also Volume backed live migration w/o shared storage needs to delete
# newly created instance-xxx dir on the destination as a part of its
# rollback process
is_volume_backed = False
is_shared_storage = True
if migrate_data:
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_shared_storage = migrate_data.get('is_shared_storage', True)
if block_migration or (is_volume_backed and not is_shared_storage):
self.compute_rpcapi.rollback_live_migration_at_destination(context,
instance, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
@wrap_exception()
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: an Instance dict sent over rpc
"""
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
self.driver.rollback_live_migration_at_destination(context, instance,
network_info, block_device_info)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
curr_time = time.time()
if self._last_info_cache_heal + heal_interval > curr_time:
return
self._last_info_cache_heal = curr_time
instance_uuids = getattr(self, '_instance_uuids_to_heal', None)
instance = None
while not instance or instance['host'] != self.host:
if instance_uuids:
try:
instance = instance_obj.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
else:
# No more in our copy of uuids. Pull from the DB.
db_instances = instance_obj.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
if not db_instances:
# None.. just return.
return
instance = db_instances[0]
instance_uuids = [inst['uuid'] for inst in db_instances[1:]]
self._instance_uuids_to_heal = instance_uuids
# We have an instance now and it's ours
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self._get_instance_nw_info(context, instance, use_slave=True)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
LOG.debug(_("An error occurred"), exc_info=True)
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state': task_states.REBOOTING,
'host': self.host}
rebooting = instance_obj.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance['updated_at'],
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = self.conductor_api.instance_get_all_by_filters(
context, filters, columns_to_join=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance['launched_at'],
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.conductor_api.compute_unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
mig_list_cls = migration_obj.MigrationList
migrations = mig_list_cls.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warn(_("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
migration.save(context.elevated())
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = instance_obj.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance['vm_state'] == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.error(_("Error auto-confirming resize: %s. "
"Will retry later.") % e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'host': self.host}
shelved_instances = instance_obj.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save()
self.shelve_offload_instance(context, instance)
except Exception:
LOG.exception(_('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
if compute_utils.has_audit_been_run(context,
self.conductor_api,
self.host):
return
begin, end = utils.last_completed_audit_period()
capi = self.conductor_api
instances = capi.instance_get_active_by_window_joined(
context, begin, end, host=self.host)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, num_instances)
for instance in instances:
try:
self.conductor_api.notify_usage_exists(
context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_('Failed to generate usage '
'audit for instance '
'on host %s') % self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
self.conductor_api,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if (CONF.bandwidth_poll_interval <= 0 or not self._bw_usage_supported):
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = instance_obj.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.warning(_("Bandwidth usage not supported by hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
# TODO(geekinutah): Once bw_usage_cache object is created
# need to revisit this and slaveify.
usage = self.conductor_api.bw_usage_get(context,
bw_ctr['uuid'],
start_time,
bw_ctr['mac_address'])
if usage:
bw_in = usage['bw_in']
bw_out = usage['bw_out']
last_ctr_in = usage['last_ctr_in']
last_ctr_out = usage['last_ctr_out']
else:
# TODO(geekinutah): Same here, pls slaveify
usage = self.conductor_api.bw_usage_get(
context, bw_ctr['uuid'], prev_time,
bw_ctr['mac_address'])
if usage:
last_ctr_in = usage['last_ctr_in']
last_ctr_out = usage['last_ctr_out']
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
self.conductor_api.bw_usage_update(context,
bw_ctr['uuid'],
bw_ctr['mac_address'],
start_time,
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = instance_obj.InstanceList.get_by_host(context, self.host)
for instance in instances:
instance_bdms = [bdm for bdm in
(block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.conductor_api.vol_usage_update(context, usage['volume'],
usage['rd_req'],
usage['rd_bytes'],
usage['wr_req'],
usage['wr_bytes'],
usage['instance'])
@periodic_task.periodic_task
def _poll_volume_usage(self, context, start_time=None):
if CONF.volume_usage_poll_interval == 0:
return
if not start_time:
start_time = utils.last_completed_audit_period()[1]
curr_time = time.time()
if (curr_time - self._last_vol_usage_poll) < \
CONF.volume_usage_poll_interval:
return
self._last_vol_usage_poll = curr_time
compute_host_bdms = self._get_host_volume_bdms(context)
if not compute_host_bdms:
return
LOG.debug(_("Updating volume usage cache"))
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = instance_obj.InstanceList.get_by_host(context,
self.host,
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warn(_("Found %(num_db_instances)s in the database and "
"%(num_vm_instances)s on the hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
for db_instance in db_instances:
if db_instance['task_state'] is not None:
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
continue
# No pending tasks. Now try to figure out the real vm_power_state.
try:
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance['state']
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore and move on to next instance.
continue
except Exception:
LOG.exception(_("Periodic sync_power_state task had an error "
"while processing an instance."),
instance=db_instance)
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s") %
{'src': self.host,
'dst': db_instance.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
return
if vm_power_state != db_power_state:
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.PAUSED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warn(_("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warn(_("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warn(_("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance is not stopped. Calling "
"the stop API."), instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warn(_("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug(_("CONF.reclaim_instance_interval <= 0, skipping..."))
return
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = instance_obj.InstanceList.get_by_filters(
context, filters,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
LOG.info(_('Reclaiming deleted instance'), instance=instance)
# NOTE(comstud): Quotas were already accounted for when
# the instance was soft deleted, so there's no need to
# pass reservations here.
try:
self._delete_instance(context, instance, bdms)
except Exception as e:
LOG.warning(_("Periodic reclaim failed to delete "
"instance: %s"),
unicode(e), instance=instance)
@periodic_task.periodic_task
def update_available_resource(self, context):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
"""
new_resource_tracker_dict = {}
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
rt.update_available_resource(context)
new_resource_tracker_dict[nodename] = rt
# Delete orphan compute node not reported by driver but still in db
compute_nodes_in_db = self._get_compute_nodes_in_db(context)
for cn in compute_nodes_in_db:
if cn.get('hypervisor_hostname') not in nodenames:
LOG.audit(_("Deleting orphan compute node %s") % cn['id'])
self.conductor_api.compute_node_delete(context, cn)
self._resource_tracker_dict = new_resource_tracker_dict
def _get_compute_nodes_in_db(self, context):
service_ref = self.conductor_api.service_get_by_compute_host(
context, self.host)
if not service_ref:
LOG.error(_("No service record for host %s"), self.host)
return []
return service_ref['compute_node']
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
if action == "log":
LOG.warning(_("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance['name'], instance=instance)
elif action == 'shutdown':
LOG.info(_("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance['name'], instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.warn(_("set_bootable is not implemented for "
"the current driver"))
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _("Failed to power off instance")
LOG.warn(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance['name'], instance=instance)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance['uuid'], bdms)
except Exception as e:
LOG.warning(_("Periodic cleanup failed to delete "
"instance: %s"),
unicode(e), instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance['deleted_at']
if isinstance(instance, obj_base.NovaObject) and deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance_uuid,
reservations=None,
instance_state=vm_states.ACTIVE):
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations)
LOG.info(_("Setting instance back to %(state)s after: "
"%(error)s") %
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
self._quota_rollback(context, reservations)
LOG.info(_("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception as error:
LOG.exception(_('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
self._quota_rollback(context, reservations)
self._set_instance_error_state(context, instance_uuid)
@aggregate_object_compat
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug(_('Hypervisor driver does not support '
'add_aggregate_host'))
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
self.conductor_api.aggregate_host_delete,
aggregate, host)
@aggregate_object_compat
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug(_('Hypervisor driver does not support '
'remove_aggregate_host'))
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
self.conductor_api.aggregate_host_add,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug(_('Processing event %(event)s'),
{'event': event.key, 'instance': instance})
_event.send(event)
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
if event.name == 'network-changed':
self.network_api.get_instance_nw_info(context, instance)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
if CONF.image_cache_manager_interval == 0:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = instance_obj.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
if CONF.instance_delete_interval == 0:
return
LOG.debug(_('Cleaning up deleted instances'))
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = instance_obj.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs)
LOG.debug(_('There are %d instances to clean'), len(instances))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug(_('Instance has had %(attempts)s of %(max)s '
'cleanup attempts'),
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save(context)
|
apache-2.0
| -2,143,329,436,891,058,400
| 44.235739
| 79
| 0.561126
| false
| 4.692721
| false
| false
| false
|
agx/git-buildpackage
|
setup.py
|
1
|
3370
|
#!/usr/bin/python3
# vim: set fileencoding=utf-8 :
# Copyright (C) 2006-2011 Guido Günther <agx@sigxcpu.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
# END OF COPYRIGHT #
import os
import re
from setuptools import setup, find_packages
VERSION_PY_PATH = 'gbp/version.py'
def _parse_changelog():
"""Get version from debian changelog and write it to gbp/version.py"""
with open("debian/changelog", encoding="utf-8") as f:
line = f.readline()
# Parse version from changelog without external tooling so it can work
# on non Debian systems.
m = re.match(".* \\(([0-9a-zA-Z.~\\-:+]+)\\) ", line)
if m:
return m.group(1)
raise ValueError('Could not parse version from debian/changelog')
def _save_version_py(version):
with open(VERSION_PY_PATH, 'w') as f:
f.write('"The current gbp version number"\n')
f.write('gbp_version = "%s"\n' % version)
def _load_version():
with open(VERSION_PY_PATH, 'r') as f:
version_py = f.read()
version_py_globals = {}
exec(version_py, version_py_globals)
return version_py_globals['gbp_version']
def parse_and_fetch_version():
if os.path.exists('debian/changelog'):
version = _parse_changelog()
_save_version_py(version)
# we could return with the version here, but instead we check that
# the file has been properly written and it can be loaded back
version = _load_version()
return version
def readme():
with open('README.md') as file:
return file.read()
def setup_requires():
if os.getenv('WITHOUT_NOSETESTS'):
return []
else:
return ['nose>=0.11.1', 'coverage>=2.85', 'nosexcover>=1.0.7']
setup(name="gbp",
version=parse_and_fetch_version(),
author=u'Guido Günther',
author_email='agx@sigxcpu.org',
url='https://honk.sigxcpu.org/piki/projects/git-buildpackage/',
description='Suite to help with Debian packages in Git repositories',
license='GPLv2+',
long_description=readme(),
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Version Control',
'Operating System :: POSIX :: Linux',
],
scripts=['bin/git-pbuilder',
'bin/gbp-builder-mock'],
packages=find_packages(exclude=['tests', 'tests.*']),
data_files=[("share/git-buildpackage/", ["gbp.conf"]), ],
requires=['dateutil'],
install_requires=[
'python-dateutil',
],
setup_requires=setup_requires(),
python_requires='>=3.5',
entry_points={
'console_scripts': ['gbp=gbp.scripts.supercommand:supercommand'],
},
)
|
gpl-2.0
| 564,592,606,133,849,100
| 31.07619
| 75
| 0.635986
| false
| 3.645022
| false
| false
| false
|
rhazdon/django-sonic-screwdriver
|
django_sonic_screwdriver/apps/ban/migrations/0001_initial.py
|
1
|
2978
|
# Generated by Django 3.0.2 on 2020-01-23 13:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserBan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Model was created at this time.', verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Model was updated at this time.', verbose_name='Updated at')),
('end_date', models.DateTimeField(blank=True, help_text='The end date tells, until the ban is valid. If the end_date is empty, the ban is infinit.', null=True, verbose_name='End date of the ban')),
('banned_user', models.ForeignKey(help_text='This is the banned user or the receiver of the ban.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='banned_user', to=settings.AUTH_USER_MODEL, verbose_name='User')),
('creator', models.ForeignKey(help_text='This is the creator of the ban. If the creator is empty, the ban was created by the system.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'User Ban',
'verbose_name_plural': 'User Bans',
},
),
migrations.CreateModel(
name='IPBan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Model was created at this time.', verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Model was updated at this time.', verbose_name='Updated at')),
('end_date', models.DateTimeField(blank=True, help_text='The end date tells, until the ban is valid. If the end_date is empty, the ban is infinit.', null=True, verbose_name='End date of the ban')),
('ip', models.GenericIPAddressField(help_text='This is the banned ip. Every request from this IP will result in 403.', null=True, verbose_name='IP')),
('creator', models.ForeignKey(help_text='This is the creator of the ban. If the creator is empty, the ban was created by the system.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'IP Ban',
'verbose_name_plural': 'IP Bans',
},
),
]
|
mit
| -282,668,870,110,881,380
| 62.361702
| 258
| 0.629953
| false
| 4.024324
| false
| false
| false
|
cuckoobox/cuckoo
|
stuff/distributed/cluster-test.py
|
1
|
3084
|
# Copyright (C) 2017-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import click
import requests
import time
class Script(object):
def __init__(self):
self.name = self.__class__.__name__.lower()
self.filename = "%s.py" % self.name
self.source = self.__doc__
def check(self):
pass
class Internet(Script):
r"""
import socket
s = socket.create_connection(("google.com", 80))
s.send("GET / HTTP/1.0\r\nHost: google.com\r\n\r\n")
s.recv(0x10000)
"""
def check(self, report):
for dns in report.get("network", {}).get("dns", []):
if dns["request"] == "google.com" and dns["answers"]:
return True
return False
@click.command()
@click.argument("host")
@click.argument("port", default=9003, required=False)
@click.option("-s", "--script", default="internet")
def main(host, port, script):
for cls in Script.__subclasses__():
if cls().name == script:
script = cls()
break
else:
print "Unknown script:", script
exit(1)
owner = "cluster.test.%d" % int(time.time())
url = "http://%s:%s" % (host, port)
r = requests.get("%s/api/node" % url).json()
machines = []
for name, info in r["nodes"].items():
if not info["enabled"]:
continue
print "indexing..", name
info = requests.post("%s/api/node/%s/refresh" % (url, name)).json()
for vm in info["machines"]:
machines.append((name, vm["name"]))
tasks = {}
for node, vmname in machines:
r = requests.post("%s/api/task" % url, files={
"file": (script.filename, script.source),
}, data={
"node": node,
"machine": vmname,
"options": "json.calls=0",
"priority": 5,
"owner": owner,
})
tasks[r.json()["task_id"]] = node, vmname
print "submitted..", node, vmname, r.json()["task_id"]
status = []
while tasks:
r = requests.get("%s/api/task" % url, params={
"status": "finished",
"owner": owner,
})
assert r.status_code == 200
for task in r.json()["tasks"].values():
r = requests.get("%s/api/report/%d" % (url, task["id"]))
if task["id"] not in tasks:
continue
node, vmname = tasks.pop(task["id"])
ret = script.check(r.json())
status.append((node, vmname, task["id"], ret))
print "finished..", status[-1], "report.length=%d" % len(r.text)
if not ret:
print "^-- incorrect return value!"
else:
requests.delete("%s/api/task/%d" % (url, task["id"]))
counts = {}
for node, _ in tasks.values():
counts[node] = counts.get(node, 0) + 1
print "left:", " ".join("%s=%s" % (k, v) for k, v in counts.items())
time.sleep(3)
if __name__ == "__main__":
main()
|
mit
| -8,013,044,425,276,418,000
| 29.534653
| 76
| 0.519455
| false
| 3.5986
| false
| false
| false
|
Illumina/HapMix
|
scripts/haplobams/split_by_haplotype_lib.py
|
1
|
11911
|
import sys
sys.path.append('/opt/rh/python27/root/usr/lib64/python2.7/site-packages/pysam')
sys.path.append('/home/ptedder/local/lib/python2.7/site-packages')
sys.path.append('/home/ptedder/local/lib64/python2.7/site-packages')
print sys.path
import random,pysam,re,subprocess,HTSeq,pdb,argparse
from collections import defaultdict
parser = argparse.ArgumentParser()
parser.add_argument('-s','--sample_name',help="please give sample name e.g. NA12878",required=True)
parser.add_argument('-c','--chr_no',help="please give chr_no",required=True)
parser.add_argument('-b','--bam_file',help="please specify bam file",required=True)
parser.add_argument('-r','--ref_file',help="please specify reference directory",required=True)
parser.add_argument('-n','--snp_file',help="please specify tabixed haplotyped SNP file",required=True)
parser.add_argument('-i','--indel_file',help="please specify tabixed haplotyped indel file",required=True)
parser.add_argument('-x','--chr_prefix',help="does the chromsome need a prefix eg chr",required=False)
args = parser.parse_args()
sample_name=args.sample_name
chr_no=args.chr_no
bam_file=args.bam_file
ref_file=args.ref_file
print "chr_no ", chr_no
snp_file=args.snp_file
indel_file=args.indel_file
if (args.chr_prefix):
chr= args.chr_prefix+str(chr_no)
else:
chr=str(chr_no)
sequence={}
for s in HTSeq.FastaReader(ref_file):
sequence[s.name]=s
reference_seq=sequence["chr"+str(chr_no)]
pos_ref=0
samfile = pysam.Samfile(bam_file,"rb")
haplotyped_snp_file=subprocess.Popen(['tabix',snp_file,chr_no ],stdout=subprocess.PIPE)
haplotyped_indel_file=subprocess.Popen(['tabix',indel_file,chr_no ],stdout=subprocess.PIPE)
#d={'hc':0,'hd':0,'bt':0,'ot':0,'rf':0,'fr':0}
haplotypeC_bam= pysam.Samfile("haplotypeC_"+chr +".bam", "wb", template=samfile)
haplotypeD_bam= pysam.Samfile("haplotypeD_"+chr+".bam", "wb", template=samfile)
haplotype_count={}
def main():
read_variant_dict={}
paired_read={}
(haplotype_dict_snvs,haplotype_dict_snvs_pos)=read_in_vcf(haplotyped_snp_file)
(haplotype_dict_indels,haplotype_dict_indels_pos)=read_in_vcf(haplotyped_indel_file)
chr_variant_dict={}
chr_variant_dict['haplotypeC']=dict(haplotype_dict_snvs['haplotypeC'].items()+haplotype_dict_indels['haplotypeC'].items())
chr_variant_dict['haplotypeD']=dict(haplotype_dict_snvs['haplotypeD'].items()+haplotype_dict_indels['haplotypeD'].items())
haplotype_dict_pos=dict(haplotype_dict_snvs_pos.items()+haplotype_dict_indels_pos.items())
for read_line in samfile.fetch(chr):
if read_line.cigar == None:
continue #SKIPPING READ AS UNMAPPED
if not read_line.qname in read_variant_dict:
read_variant_dict[read_line.qname]={}
rvd=variant_count(read_line,haplotype_dict_pos)
read_variant_dict[read_line.qname].update(rvd) #HYPOTHETICAL BUG IF INDEL AND SNP AT SAME POS
if not read_line.qname in haplotype_count:
haplotype_count[read_line.qname]={'other':{},'C':{},'D':{}}
#COUNT NUMBER OF MUTATIONS FOR EACH READ WHICH CAN BE ASSIGNED TO EACH HAPLOTYPE
for variant_pos in read_variant_dict[read_line.qname].keys():
if variant_pos in chr_variant_dict['haplotypeC'] and variant_pos in chr_variant_dict['haplotypeD'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeC'][variant_pos]['alt'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeD'][variant_pos]['alt']: #check hom/het and call:
haplotype_count[read_line.qname]['C'][variant_pos]={}
haplotype_count[read_line.qname]['D'][variant_pos]={}
elif variant_pos in chr_variant_dict['haplotypeC'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeC'][variant_pos]['alt']:
haplotype_count[read_line.qname]['C'][variant_pos]={'call':read_variant_dict[read_line.qname][variant_pos]['call']}
elif variant_pos in chr_variant_dict['haplotypeD'] and read_variant_dict[read_line.qname][variant_pos]['call']== chr_variant_dict['haplotypeD'][variant_pos]['alt']:
haplotype_count[read_line.qname]['D'][variant_pos]={}
else:
haplotype_count[read_line.qname]['other'][variant_pos]={}
# IS IT THE SECOND/ORPHAN READ? CAN THE READ BE ASSIGNED UNIQUELY TO EITHER OF THE HAPLOTYPES?
if not read_line.is_proper_pair or (read_line.pnext in paired_read and read_line.qname in paired_read[read_line.pnext]) :
haplotype=assign_to_haplotype(haplotype_count,paired_read,read_line)
write_to_bam_file(haplotype,paired_read,read_line)
haplotype_count.pop(read_line.qname, None)
read_variant_dict.pop(read_line.qname, None)
# IS IT THE FIRST READ? ADD TO DICT
if read_line.is_proper_pair and not read_line.pnext in paired_read:
if not read_line.pos in paired_read:
paired_read[read_line.pos]={}
if not read_line.qname in paired_read[read_line.pos]:
paired_read[read_line.pos][read_line.qname]=read_line
#FLUSH DICTIONARIES EVERY 10k bp
if not read_line.pos % 1e4:
tmpkeys=paired_read.keys()
for pos in tmpkeys:
if pos<read_line.pos:
paired_read.pop(pos, None)
def read_in_vcf(vcf_file):
cd={'haplotypeC':{},'haplotypeD':{}}
csdl={}
for line in vcf_file.stdout:
if re.match('#',line):
continue
if not re.search('bwa',line) and not re.search('isaac',line): # ONLY TRUST ISAAC & BWA BASED CALLS
continue
else:
(chrom,pos,id,ref,alt,qual,filter,info,format,NA12877,NA12878,NA12879,NA12880,NA12881,NA12882,NA12883,NA12884,NA12885,NA12886,NA12887,NA12888,NA12889,NA12890,NA12891,NA12892,NA12893)=line.strip().split('\t')
if re.match('chr',chr) and not re.match('chr',chrom):
chrom='chr'+chrom
if chrom != chr:
continue
pos=int(float(pos))
format_columns=format.split(':') #JUST GENOTYPE AND EDIT DISTANCE
format_columns_data=eval(sample_name).split(':')
f_dict={}
for i,k in enumerate(format_columns):
f_dict[k]=format_columns_data[i]
if 'GT' in f_dict:
if re.search("/",f_dict['GT']):
continue
(ploidyC,ploidyD)=f_dict['GT'].split('|')
(ploidyC,ploidyD)=(int(ploidyC),int(ploidyD))
ploidyC_base_call=''
ploidyD_base_call=''
if ploidyC ==0 and ploidyD ==0:
continue # not haplotyped so skip
if ploidyC ==0:
ploidyC_base_call=ref
elif ploidyC ==1:
ploidyC_base_call=alt
if ploidyD ==0:
ploidyD_base_call=ref
elif ploidyD ==1:
ploidyD_base_call=alt
if len(ref)==len(alt)==1:
type='S'
if len(ref)==len(alt)!=1:
type='SUB'
if len(ref)>len(alt):
type='D'
if len(ref)<len(alt):
type='I'
cd['haplotypeC'][pos]={'pos':pos,'alt':ploidyC_base_call}
cd['haplotypeD'][pos]={'pos':pos,'alt':ploidyD_base_call}
csdl[pos]={'ref':ref,'alt':alt,'type':type}
else:
sys.exit("no genotyping on line")
return(cd,csdl)
def variant_count(read_line,haplotype_dict_pos):
pos_in_read=0
pos_ref=read_line.pos
read_variant_dict={}
for cigar_operations in read_line.cigar:
(type_cigar_op,length_cigar_op)=cigar_operations
if type_cigar_op==0 or type_cigar_op==7: #MATCH
ref_pos=pos_ref
for ii in range(0,length_cigar_op):
chr='chr'+str(read_line.tid)
ref_base=reference_seq.seq[ref_pos].upper()
pos_ref+=1
if pos_ref in haplotype_dict_pos: # IF ITS A HAPLOTYPED READ
if haplotype_dict_pos[pos_ref]['type']=='S':
read_variant_dict[pos_ref]={'type':haplotype_dict_pos[pos_ref]['type'],'call':read_line.seq[pos_in_read],'ref':ref_base}
if haplotype_dict_pos[pos_ref]['type']=='D':
ref_del=reference_seq.seq[pos_ref-1:pos_ref+length_cigar_op].upper()
read_variant_dict[pos_ref]={'type':'D','alt':haplotype_dict_pos[pos_ref]['alt'],'call':haplotype_dict_pos[pos_ref]['ref'],'ln':len(haplotype_dict_pos[pos_ref]['alt'])} # deletions vcf ref will be longer than alt
if haplotype_dict_pos[pos_ref]['type']=='I':
read_variant_dict[pos_ref]={'type':'I','alt':haplotype_dict_pos[pos_ref]['alt'],'call':haplotype_dict_pos[pos_ref]['ref']} # for indels this has to be base before as well
pos_in_read+=1
elif type_cigar_op==3 : #N
pos_in_read+=length_cigar_op
pos_ref+=length_cigar_op
elif type_cigar_op==4: # SOFT CLIP
pos_in_read+=length_cigar_op #BAM FILE START POS IS AFTER SOFT CLIPPING
elif type_cigar_op==1 :# INSERTION
if pos_ref in haplotype_dict_pos:
read_variant_dict[pos_ref]={'type':'I','call':read_line.seq[pos_in_read-1:pos_in_read+length_cigar_op],'ref':read_line.seq[pos_in_read-1]} # for indels this has to be base before as well
pos_in_read+=length_cigar_op
pos_ref+=1
elif type_cigar_op==2 :# DELETION
if pos_ref in haplotype_dict_pos:
ref_del=reference_seq.seq[pos_ref-1:pos_ref+length_cigar_op].upper()
read_variant_dict[pos_ref]={'type':'D','call':read_line.seq[pos_in_read-1],'alt':read_line.seq[pos_in_read-1],'ref':ref_del,'ln':length_cigar_op} # deletions vcf ref will be longer than alt
pos_ref+=length_cigar_op
return read_variant_dict
def write_to_bam_file(haplotype,paired_read,read_line):
if haplotype =='haplotypeC':
haplotypeC_bam.write(read_line)
elif haplotype =='haplotypeD':
haplotypeD_bam.write(read_line)
if read_line.is_proper_pair:
other_read=paired_read[read_line.pnext][read_line.qname]
if haplotype =='haplotypeC':
haplotypeC_bam.write(other_read)
elif haplotype =='haplotypeD':
haplotypeD_bam.write(other_read)
def assign_to_haplotype(haplotype_count,paired_read,read_line):
if len(haplotype_count[read_line.qname]['C']) != 0 and len(haplotype_count[read_line.qname]['D']) == 0 :
haplotype='haplotypeC'
if len(haplotype_count[read_line.qname]['C']) == 0 and len(haplotype_count[read_line.qname]['D']) != 0 :
haplotype='haplotypeD'
elif len(haplotype_count[read_line.qname]['C']) != 0 and len(haplotype_count[read_line.qname]['D']) != 0 :
if random.random()<0.5:
haplotype='haplotypeC'
else:
haplotype='haplotypeD'
elif len(haplotype_count[read_line.qname]['C']) == 0 and len(haplotype_count[read_line.qname]['D']) == 0 and len(haplotype_count[read_line.qname]['other']) != 0:
if random.random()<0.5:
haplotype='haplotypeC'
else:
haplotype='haplotypeD'
elif len(haplotype_count[read_line.qname]['C']) == 0 and len(haplotype_count[read_line.qname]['D']) == 0 and len(haplotype_count[read_line.qname]['other']) == 0:
if random.random() <0.5:
haplotype='haplotypeC'
else:
haplotype='haplotypeD'
return haplotype
if __name__ == "__main__":
main()
|
gpl-3.0
| -1,688,665,998,435,795,000
| 43.114815
| 363
| 0.608513
| false
| 2.980731
| false
| false
| false
|
Runscope/pysaml2
|
src/saml2/pack.py
|
1
|
8806
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""Contains classes and functions that are necessary to implement
different bindings.
Bindings normally consists of three parts:
- rules about what to send
- how to package the information
- which protocol to use
"""
import urlparse
import saml2
import base64
import urllib
from saml2.s_utils import deflate_and_base64_encode
from saml2.s_utils import Unsupported
import logging
from saml2.sigver import REQ_ORDER
from saml2.sigver import RESP_ORDER
from saml2.sigver import SIGNER_ALGS
logger = logging.getLogger(__name__)
try:
from xml.etree import cElementTree as ElementTree
if ElementTree.VERSION < '1.3.0':
# cElementTree has no support for register_namespace
# neither _namespace_map, thus we sacrify performance
# for correctness
from xml.etree import ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
NAMESPACE = "http://schemas.xmlsoap.org/soap/envelope/"
FORM_SPEC = """<form method="post" action="%s">
<input type="hidden" name="%s" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
<input type="submit" value="Submit" />
</form>"""
def http_form_post_message(message, location, relay_state="",
typ="SAMLRequest"):
"""The HTTP POST binding defines a mechanism by which SAML protocol
messages may be transmitted within the base64-encoded content of a
HTML form control.
:param message: The message
:param location: Where the form should be posted to
:param relay_state: for preserving and conveying state information
:return: A tuple containing header information and a HTML message.
"""
response = ["<head>", """<title>SAML 2.0 POST</title>""", "</head><body>"]
if not isinstance(message, basestring):
message = "%s" % (message,)
if typ == "SAMLRequest" or typ == "SAMLResponse":
_msg = base64.b64encode(message)
else:
_msg = message
response.append(FORM_SPEC % (location, typ, _msg, relay_state))
response.append("""<script type="text/javascript">""")
response.append(" window.onload = function ()")
response.append(" { document.forms[0].submit(); }")
response.append("""</script>""")
response.append("</body>")
return {"headers": [("Content-type", "text/html")], "data": response}
def http_redirect_message(message, location, relay_state="", typ="SAMLRequest",
sigalg=None, key=None):
"""The HTTP Redirect binding defines a mechanism by which SAML protocol
messages can be transmitted within URL parameters.
Messages are encoded for use with this binding using a URL encoding
technique, and transmitted using the HTTP GET method.
The DEFLATE Encoding is used in this function.
:param message: The message
:param location: Where the message should be posted to
:param relay_state: for preserving and conveying state information
:param typ: What type of message it is SAMLRequest/SAMLResponse/SAMLart
:param sigalg: The signature algorithm to use.
:param key: Key to use for signing
:return: A tuple containing header information and a HTML message.
"""
if not isinstance(message, basestring):
message = "%s" % (message,)
_order = None
if typ in ["SAMLRequest", "SAMLResponse"]:
if typ == "SAMLRequest":
_order = REQ_ORDER
else:
_order = RESP_ORDER
args = {typ: deflate_and_base64_encode(message)}
elif typ == "SAMLart":
args = {typ: message}
else:
raise Exception("Unknown message type: %s" % typ)
if relay_state:
args["RelayState"] = relay_state
if sigalg:
# sigalgs
# http://www.w3.org/2000/09/xmldsig#dsa-sha1
# http://www.w3.org/2000/09/xmldsig#rsa-sha1
args["SigAlg"] = sigalg
try:
signer = SIGNER_ALGS[sigalg]
except:
raise Unsupported("Signing algorithm")
else:
string = "&".join([urllib.urlencode({k: args[k]}) for k in _order if k in args])
args["Signature"] = base64.b64encode(signer.sign(string, key))
string = urllib.urlencode(args)
else:
string = urllib.urlencode(args)
glue_char = "&" if urlparse.urlparse(location).query else "?"
login_url = glue_char.join([location, string])
headers = [('Location', str(login_url))]
body = []
return {"headers": headers, "data": body}
DUMMY_NAMESPACE = "http://example.org/"
PREFIX = '<?xml version="1.0" encoding="UTF-8"?>'
def make_soap_enveloped_saml_thingy(thingy, header_parts=None):
""" Returns a soap envelope containing a SAML request
as a text string.
:param thingy: The SAML thingy
:return: The SOAP envelope as a string
"""
envelope = ElementTree.Element('')
envelope.tag = '{%s}Envelope' % NAMESPACE
if header_parts:
header = ElementTree.Element('')
header.tag = '{%s}Header' % NAMESPACE
envelope.append(header)
for part in header_parts:
# This doesn't work if the headers are signed
part.become_child_element_of(header)
body = ElementTree.Element('')
body.tag = '{%s}Body' % NAMESPACE
envelope.append(body)
if isinstance(thingy, basestring):
# remove the first XML version/encoding line
logger.debug("thingy0: %s" % thingy)
_part = thingy.split("\n")
thingy = "".join(_part[1:])
thingy = thingy.replace(PREFIX, "")
logger.debug("thingy: %s" % thingy)
_child = ElementTree.Element('')
_child.tag = '{%s}FuddleMuddle' % DUMMY_NAMESPACE
body.append(_child)
_str = ElementTree.tostring(envelope, encoding="UTF-8")
logger.debug("SOAP precursor: %s" % _str)
# find an remove the namespace definition
i = _str.find(DUMMY_NAMESPACE)
j = _str.rfind("xmlns:", 0, i)
cut1 = _str[j:i + len(DUMMY_NAMESPACE) + 1]
_str = _str.replace(cut1, "")
first = _str.find("<%s:FuddleMuddle" % (cut1[6:9],))
last = _str.find(">", first + 14)
cut2 = _str[first:last + 1]
return _str.replace(cut2, thingy)
else:
thingy.become_child_element_of(body)
return ElementTree.tostring(envelope, encoding="UTF-8")
def http_soap_message(message):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message)}
def http_paos(message, extra=None):
return {"headers": [("Content-type", "application/soap+xml")],
"data": make_soap_enveloped_saml_thingy(message, extra)}
def parse_soap_enveloped_saml(text, body_class, header_class=None):
"""Parses a SOAP enveloped SAML thing and returns header parts and body
:param text: The SOAP object as XML
:return: header parts and body as saml.samlbase instances
"""
envelope = ElementTree.fromstring(text)
assert envelope.tag == '{%s}Envelope' % NAMESPACE
#print len(envelope)
body = None
header = {}
for part in envelope:
#print ">",part.tag
if part.tag == '{%s}Body' % NAMESPACE:
for sub in part:
try:
body = saml2.create_class_from_element_tree(body_class, sub)
except Exception:
raise Exception(
"Wrong body type (%s) in SOAP envelope" % sub.tag)
elif part.tag == '{%s}Header' % NAMESPACE:
if not header_class:
raise Exception("Header where I didn't expect one")
#print "--- HEADER ---"
for sub in part:
#print ">>",sub.tag
for klass in header_class:
#print "?{%s}%s" % (klass.c_namespace,klass.c_tag)
if sub.tag == "{%s}%s" % (klass.c_namespace, klass.c_tag):
header[sub.tag] = \
saml2.create_class_from_element_tree(klass, sub)
break
return body, header
# -----------------------------------------------------------------------------
PACKING = {
saml2.BINDING_HTTP_REDIRECT: http_redirect_message,
saml2.BINDING_HTTP_POST: http_form_post_message,
}
def packager(identifier):
try:
return PACKING[identifier]
except KeyError:
raise Exception("Unkown binding type: %s" % identifier)
def factory(binding, message, location, relay_state="", typ="SAMLRequest"):
return PACKING[binding](message, location, relay_state, typ)
|
bsd-2-clause
| -1,560,756,759,072,883,200
| 33.533333
| 92
| 0.607086
| false
| 3.920748
| false
| false
| false
|
mosra/magnum-examples
|
src/python/magnum-primitives-scenegraph.py
|
1
|
4843
|
#!/usr/bin/env python3
#
# This file is part of Magnum.
#
# Original authors — credit is appreciated but not required:
#
# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021
# — Vladimír Vondruš <mosra@centrum.cz>
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or distribute
# this software, either in source code form or as a compiled binary, for any
# purpose, commercial or non-commercial, and by any means.
#
# In jurisdictions that recognize copyright laws, the author or authors of
# this software dedicate any and all copyright interest in the software to
# the public domain. We make this dedication for the benefit of the public
# at large and to the detriment of our heirs and successors. We intend this
# dedication to be an overt act of relinquishment in perpetuity of all
# present and future rights to this software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from magnum import *
from magnum import gl, meshtools, primitives, scenegraph, shaders
from magnum.platform.sdl2 import Application
from magnum.scenegraph.matrix import Scene3D, Object3D
class CubeDrawable(scenegraph.Drawable3D):
def __init__(self, object: Object3D, drawables: scenegraph.DrawableGroup3D,
mesh: gl.Mesh, shader: shaders.PhongGL, color: Color4):
scenegraph.Drawable3D.__init__(self, object, drawables)
self._mesh = mesh
self._shader = shader
self.color = color # Settable from outside
def draw(self, transformation_matrix: Matrix4, camera: scenegraph.Camera3D):
self._shader.light_positions = [
Vector4(camera.camera_matrix.transform_point((-3.0, 5.0, 10.0)), 0.0)
]
self._shader.light_colors = [Color3(1.0)]
self._shader.diffuse_color = self.color
self._shader.ambient_color = Color3.from_hsv(self.color.hue(), 1.0, 0.3)
self._shader.transformation_matrix = transformation_matrix
self._shader.normal_matrix = transformation_matrix.rotation_scaling()
self._shader.projection_matrix = camera.projection_matrix
self._shader.draw(self._mesh)
class PrimitivesSceneGraphExample(Application):
def __init__(self):
configuration = self.Configuration()
configuration.title = "Magnum Python Primitives + SceneGraph Example"
Application.__init__(self, configuration)
gl.Renderer.enable(gl.Renderer.Feature.DEPTH_TEST)
gl.Renderer.enable(gl.Renderer.Feature.FACE_CULLING)
# Scene and drawables
self._scene = Scene3D()
self._drawables = scenegraph.DrawableGroup3D()
# Camera setup
camera_object = Object3D(parent=self._scene)
camera_object.translate(Vector3.z_axis(10.0))
self._camera = scenegraph.Camera3D(camera_object)
self._camera.projection_matrix = Matrix4.perspective_projection(
fov=Deg(35.0), aspect_ratio=1.33333, near=0.01, far=100.0)
# Cube object and drawable
self._cube = Object3D(parent=self._scene)
self._cube.rotate_y(Deg(40.0))
self._cube.rotate_x(Deg(30.0))
self._cube_drawable = CubeDrawable(self._cube, self._drawables,
meshtools.compile(primitives.cube_solid()), shaders.PhongGL(),
Color3.from_hsv(Deg(35.0), 1.0, 1.0))
self._previous_mouse_position = Vector2i()
def draw_event(self):
gl.default_framebuffer.clear(gl.FramebufferClear.COLOR|
gl.FramebufferClear.DEPTH)
self._camera.draw(self._drawables)
self.swap_buffers()
def mouse_release_event(self, event: Application.MouseEvent):
self._cube_drawable.color = Color3.from_hsv(
self._cube_drawable.color.hue() + Deg(50.0), 1.0, 1.0)
self.redraw()
def mouse_move_event(self, event: Application.MouseMoveEvent):
if event.buttons & self.MouseMoveEvent.Buttons.LEFT:
delta = 1.0*(
Vector2(event.position - self._previous_mouse_position)/
Vector2(self.window_size))
self._cube.rotate_y_local(Rad(delta.x))
self._cube.rotate_x(Rad(delta.y))
self.redraw()
self._previous_mouse_position = event.position
exit(PrimitivesSceneGraphExample().exec())
|
unlicense
| 7,908,722,276,422,597,000
| 42.576577
| 81
| 0.674178
| false
| 3.604322
| false
| false
| false
|
jeremiahyan/odoo
|
addons/account/models/chart_template.py
|
1
|
74310
|
# -*- coding: utf-8 -*-
from odoo.exceptions import AccessError
from odoo import api, fields, models, _
from odoo import SUPERUSER_ID
from odoo.exceptions import UserError, ValidationError
from odoo.http import request
from odoo.addons.account.models.account_tax import TYPE_TAX_USE
import logging
_logger = logging.getLogger(__name__)
def migrate_set_tags_and_taxes_updatable(cr, registry, module):
''' This is a utility function used to manually set the flag noupdate to False on tags and account tax templates on localization modules
that need migration (for example in case of VAT report improvements)
'''
env = api.Environment(cr, SUPERUSER_ID, {})
xml_record_ids = env['ir.model.data'].search([
('model', 'in', ['account.tax.template', 'account.account.tag']),
('module', 'like', module)
]).ids
if xml_record_ids:
cr.execute("update ir_model_data set noupdate = 'f' where id in %s", (tuple(xml_record_ids),))
def preserve_existing_tags_on_taxes(cr, registry, module):
''' This is a utility function used to preserve existing previous tags during upgrade of the module.'''
env = api.Environment(cr, SUPERUSER_ID, {})
xml_records = env['ir.model.data'].search([('model', '=', 'account.account.tag'), ('module', 'like', module)])
if xml_records:
cr.execute("update ir_model_data set noupdate = 't' where id in %s", [tuple(xml_records.ids)])
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class AccountGroupTemplate(models.Model):
_name = "account.group.template"
_description = 'Template for Account Groups'
_order = 'code_prefix_start'
parent_id = fields.Many2one('account.group.template', index=True, ondelete='cascade')
name = fields.Char(required=True)
code_prefix_start = fields.Char()
code_prefix_end = fields.Char()
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
class AccountAccountTemplate(models.Model):
_name = "account.account.template"
_inherit = ['mail.thread']
_description = 'Templates for Accounts'
_order = "code"
name = fields.Char(required=True, index=True)
currency_id = fields.Many2one('res.currency', string='Account Currency', help="Forces all moves for this account to have this secondary currency.")
code = fields.Char(size=64, required=True, index=True)
user_type_id = fields.Many2one('account.account.type', string='Type', required=True,
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities.")
reconcile = fields.Boolean(string='Allow Invoices & payments Matching', default=False,
help="Check this option if you want the user to reconcile entries in this account.")
note = fields.Text()
tax_ids = fields.Many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', string='Default Taxes')
nocreate = fields.Boolean(string='Optional Create', default=False,
help="If checked, the new chart of accounts will not contain this by default.")
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template',
help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you "
"to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times).")
tag_ids = fields.Many2many('account.account.tag', 'account_account_template_account_tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
@api.depends('name', 'code')
def name_get(self):
res = []
for record in self:
name = record.name
if record.code:
name = record.code + ' ' + name
res.append((record.id, name))
return res
class AccountChartTemplate(models.Model):
_name = "account.chart.template"
_description = "Account Chart Template"
name = fields.Char(required=True)
parent_id = fields.Many2one('account.chart.template', string='Parent Chart Template')
code_digits = fields.Integer(string='# of Digits', required=True, default=6, help="No. of Digits to use for account code")
visible = fields.Boolean(string='Can be Visible?', default=True,
help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from "
"templates, this is useful when you want to generate accounts of this template only when loading its child template.")
currency_id = fields.Many2one('res.currency', string='Currency', required=True)
use_anglo_saxon = fields.Boolean(string="Use Anglo-Saxon accounting", default=False)
complete_tax_set = fields.Boolean(string='Complete Set of Taxes', default=True,
help="This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list "
"of taxes. This last choice assumes that the set of tax defined on this template is complete")
account_ids = fields.One2many('account.account.template', 'chart_template_id', string='Associated Account Templates')
tax_template_ids = fields.One2many('account.tax.template', 'chart_template_id', string='Tax Template List',
help='List of all the taxes that have to be installed by the wizard')
bank_account_code_prefix = fields.Char(string='Prefix of the bank accounts', required=True)
cash_account_code_prefix = fields.Char(string='Prefix of the main cash accounts', required=True)
transfer_account_code_prefix = fields.Char(string='Prefix of the main transfer accounts', required=True)
income_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Gain Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
expense_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Loss Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
country_id = fields.Many2one(string="Country", comodel_name='res.country', help="The country this chart of accounts belongs to. None if it's generic.")
account_journal_suspense_account_id = fields.Many2one('account.account.template', string='Journal Suspense Account')
account_journal_payment_debit_account_id = fields.Many2one('account.account.template', string='Journal Outstanding Receipts Account')
account_journal_payment_credit_account_id = fields.Many2one('account.account.template', string='Journal Outstanding Payments Account')
default_cash_difference_income_account_id = fields.Many2one('account.account.template', string="Cash Difference Income Account")
default_cash_difference_expense_account_id = fields.Many2one('account.account.template', string="Cash Difference Expense Account")
default_pos_receivable_account_id = fields.Many2one('account.account.template', string="PoS receivable account")
property_account_receivable_id = fields.Many2one('account.account.template', string='Receivable Account')
property_account_payable_id = fields.Many2one('account.account.template', string='Payable Account')
property_account_expense_categ_id = fields.Many2one('account.account.template', string='Category of Expense Account')
property_account_income_categ_id = fields.Many2one('account.account.template', string='Category of Income Account')
property_account_expense_id = fields.Many2one('account.account.template', string='Expense Account on Product Template')
property_account_income_id = fields.Many2one('account.account.template', string='Income Account on Product Template')
property_stock_account_input_categ_id = fields.Many2one('account.account.template', string="Input Account for Stock Valuation")
property_stock_account_output_categ_id = fields.Many2one('account.account.template', string="Output Account for Stock Valuation")
property_stock_valuation_account_id = fields.Many2one('account.account.template', string="Account Template for Stock Valuation")
property_tax_payable_account_id = fields.Many2one('account.account.template', string="Tax current account (payable)")
property_tax_receivable_account_id = fields.Many2one('account.account.template', string="Tax current account (receivable)")
property_advance_tax_payment_account_id = fields.Many2one('account.account.template', string="Advance tax payment account")
property_cash_basis_base_account_id = fields.Many2one(
comodel_name='account.account.template',
domain=[('deprecated', '=', False)],
string="Base Tax Received Account",
help="Account that will be set on lines created in cash basis journal entry and used to keep track of the "
"tax base amount.")
@api.model
def _prepare_transfer_account_template(self, prefix=None):
''' Prepare values to create the transfer account that is an intermediary account used when moving money
from a liquidity account to another.
:return: A dictionary of values to create a new account.account.
'''
digits = self.code_digits
prefix = prefix or self.transfer_account_code_prefix or ''
# Flatten the hierarchy of chart templates.
chart_template = self
chart_templates = self
while chart_template.parent_id:
chart_templates += chart_template.parent_id
chart_template = chart_template.parent_id
new_code = ''
for num in range(1, 100):
new_code = str(prefix.ljust(digits - 1, '0')) + str(num)
rec = self.env['account.account.template'].search(
[('code', '=', new_code), ('chart_template_id', 'in', chart_templates.ids)], limit=1)
if not rec:
break
else:
raise UserError(_('Cannot generate an unused account code.'))
current_assets_type = self.env.ref('account.data_account_type_current_assets', raise_if_not_found=False)
return {
'name': _('Liquidity Transfer'),
'code': new_code,
'user_type_id': current_assets_type and current_assets_type.id or False,
'reconcile': True,
'chart_template_id': self.id,
}
@api.model
def _create_liquidity_journal_suspense_account(self, company, code_digits):
return self.env['account.account'].create({
'name': _("Bank Suspense Account"),
'code': self.env['account.account']._search_new_account_code(company, code_digits, company.bank_account_code_prefix or ''),
'user_type_id': self.env.ref('account.data_account_type_current_liabilities').id,
'company_id': company.id,
})
def try_loading(self, company=False, install_demo=True):
""" Installs this chart of accounts for the current company if not chart
of accounts had been created for it yet.
:param company (Model<res.company>): the company we try to load the chart template on.
If not provided, it is retrieved from the context.
:param install_demo (bool): whether or not we should load demo data right after loading the
chart template.
"""
# do not use `request.env` here, it can cause deadlocks
if not company:
if request and hasattr(request, 'allowed_company_ids'):
company = self.env['res.company'].browse(request.allowed_company_ids[0])
else:
company = self.env.company
# If we don't have any chart of account on this company, install this chart of account
if not company.chart_template_id and not self.existing_accounting(company):
for template in self:
template.with_context(default_company_id=company.id)._load(15.0, 15.0, company)
# Install the demo data when the first localization is instanciated on the company
if install_demo and self.env.ref('base.module_account').demo:
self.with_context(
default_company_id=company.id,
allowed_company_ids=[company.id],
)._create_demo_data()
def _create_demo_data(self):
try:
with self.env.cr.savepoint():
demo_data = self._get_demo_data()
for model, data in demo_data:
created = self.env[model]._load_records([{
'xml_id': "account.%s" % xml_id if '.' not in xml_id else xml_id,
'values': record,
'noupdate': True,
} for xml_id, record in data.items()])
self._post_create_demo_data(created)
except Exception:
# Do not rollback installation of CoA if demo data failed
_logger.exception('Error while loading accounting demo data')
def _load(self, sale_tax_rate, purchase_tax_rate, company):
""" Installs this chart of accounts on the current company, replacing
the existing one if it had already one defined. If some accounting entries
had already been made, this function fails instead, triggering a UserError.
Also, note that this function can only be run by someone with administration
rights.
"""
self.ensure_one()
# do not use `request.env` here, it can cause deadlocks
# Ensure everything is translated to the company's language, not the user's one.
self = self.with_context(lang=company.partner_id.lang).with_company(company)
if not self.env.is_admin():
raise AccessError(_("Only administrators can load a chart of accounts"))
existing_accounts = self.env['account.account'].search([('company_id', '=', company.id)])
if existing_accounts:
# we tolerate switching from accounting package (localization module) as long as there isn't yet any accounting
# entries created for the company.
if self.existing_accounting(company):
raise UserError(_('Could not install new chart of account as there are already accounting entries existing.'))
# delete accounting properties
prop_values = ['account.account,%s' % (account_id,) for account_id in existing_accounts.ids]
existing_journals = self.env['account.journal'].search([('company_id', '=', company.id)])
if existing_journals:
prop_values.extend(['account.journal,%s' % (journal_id,) for journal_id in existing_journals.ids])
self.env['ir.property'].sudo().search(
[('value_reference', 'in', prop_values)]
).unlink()
# delete account, journal, tax, fiscal position and reconciliation model
models_to_delete = ['account.reconcile.model', 'account.fiscal.position', 'account.move.line', 'account.move', 'account.journal', 'account.tax', 'account.group']
for model in models_to_delete:
res = self.env[model].sudo().search([('company_id', '=', company.id)])
if len(res):
res.with_context(force_delete=True).unlink()
existing_accounts.unlink()
company.write({'currency_id': self.currency_id.id,
'anglo_saxon_accounting': self.use_anglo_saxon,
'bank_account_code_prefix': self.bank_account_code_prefix,
'cash_account_code_prefix': self.cash_account_code_prefix,
'transfer_account_code_prefix': self.transfer_account_code_prefix,
'chart_template_id': self.id
})
#set the coa currency to active
self.currency_id.write({'active': True})
# When we install the CoA of first company, set the currency to price types and pricelists
if company.id == 1:
for reference in ['product.list_price', 'product.standard_price', 'product.list0']:
try:
tmp2 = self.env.ref(reference).write({'currency_id': self.currency_id.id})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(company.id, sale_tax_rate, purchase_tax_rate)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref = self._install_template(company, code_digits=self.code_digits)
# Set default cash difference account on company
if not company.account_journal_suspense_account_id:
company.account_journal_suspense_account_id = self._create_liquidity_journal_suspense_account(company, self.code_digits)
account_type_current_assets = self.env.ref('account.data_account_type_current_assets')
if not company.account_journal_payment_debit_account_id:
company.account_journal_payment_debit_account_id = self.env['account.account'].create({
'name': _("Outstanding Receipts"),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, company.bank_account_code_prefix or ''),
'reconcile': True,
'user_type_id': account_type_current_assets.id,
'company_id': company.id,
})
if not company.account_journal_payment_credit_account_id:
company.account_journal_payment_credit_account_id = self.env['account.account'].create({
'name': _("Outstanding Payments"),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, company.bank_account_code_prefix or ''),
'reconcile': True,
'user_type_id': account_type_current_assets.id,
'company_id': company.id,
})
if not company.default_cash_difference_expense_account_id:
company.default_cash_difference_expense_account_id = self.env['account.account'].create({
'name': _('Cash Difference Loss'),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, '999'),
'user_type_id': self.env.ref('account.data_account_type_expenses').id,
'tag_ids': [(6, 0, self.env.ref('account.account_tag_investing').ids)],
'company_id': company.id,
})
if not company.default_cash_difference_income_account_id:
company.default_cash_difference_income_account_id = self.env['account.account'].create({
'name': _('Cash Difference Gain'),
'code': self.env['account.account']._search_new_account_code(company, self.code_digits, '999'),
'user_type_id': self.env.ref('account.data_account_type_revenue').id,
'tag_ids': [(6, 0, self.env.ref('account.account_tag_investing').ids)],
'company_id': company.id,
})
# Set the transfer account on the company
company.transfer_account_id = self.env['account.account'].search([
('code', '=like', self.transfer_account_code_prefix + '%'), ('company_id', '=', company.id)], limit=1)
# Create Bank journals
self._create_bank_journals(company, acc_template_ref)
# Create the current year earning account if it wasn't present in the CoA
company.get_unaffected_earnings_account()
# set the default taxes on the company
company.account_sale_tax_id = self.env['account.tax'].search([('type_tax_use', 'in', ('sale', 'all')), ('company_id', '=', company.id)], limit=1).id
company.account_purchase_tax_id = self.env['account.tax'].search([('type_tax_use', 'in', ('purchase', 'all')), ('company_id', '=', company.id)], limit=1).id
if self.country_id:
# If this CoA is made for only one country, set it as the fiscal country of the company.
company.account_fiscal_country_id = self.country_id
return {}
@api.model
def existing_accounting(self, company_id):
""" Returns True iff some accounting entries have already been made for
the provided company (meaning hence that its chart of accounts cannot
be changed anymore).
"""
model_to_check = ['account.payment', 'account.bank.statement']
for model in model_to_check:
if self.env[model].sudo().search([('company_id', '=', company_id.id)], limit=1):
return True
if self.env['account.move'].sudo().search([('company_id', '=', company_id.id), ('state', '!=', 'draft')], limit=1):
return True
return False
def _create_tax_templates_from_rates(self, company_id, sale_tax_rate, purchase_tax_rate):
'''
This function checks if this chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax object accordingly to the provided sale/purchase rates.
Then it saves the new tax templates as default taxes to use for this chart template.
:param company_id: id of the company for which the wizard is running
:param sale_tax_rate: the rate to use for created sales tax
:param purchase_tax_rate: the rate to use for created purchase tax
:return: True
'''
self.ensure_one()
obj_tax_temp = self.env['account.tax.template']
all_parents = self._get_chart_parent_ids()
# create tax templates from purchase_tax_rate and sale_tax_rate fields
if not self.complete_tax_set:
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'sale'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': sale_tax_rate, 'name': _('Tax %.2f%%') % sale_tax_rate, 'description': '%.2f%%' % sale_tax_rate})
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'purchase'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': purchase_tax_rate, 'name': _('Tax %.2f%%') % purchase_tax_rate, 'description': '%.2f%%' % purchase_tax_rate})
return True
def _get_chart_parent_ids(self):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:return: the IDS of all ancestor charts, including the chart itself.
"""
chart_template = self
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
def _create_bank_journals(self, company, acc_template_ref):
'''
This function creates bank journals and their account for each line
data returned by the function _get_default_bank_journals_data.
:param company: the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
'''
self.ensure_one()
bank_journals = self.env['account.journal']
# Create the journals that will trigger the account.account creation
for acc in self._get_default_bank_journals_data():
bank_journals += self.env['account.journal'].create({
'name': acc['acc_name'],
'type': acc['account_type'],
'company_id': company.id,
'currency_id': acc.get('currency_id', self.env['res.currency']).id,
'sequence': 10,
})
return bank_journals
@api.model
def _get_default_bank_journals_data(self):
""" Returns the data needed to create the default bank journals when
installing this chart of accounts, in the form of a list of dictionaries.
The allowed keys in these dictionaries are:
- acc_name: string (mandatory)
- account_type: 'cash' or 'bank' (mandatory)
- currency_id (optional, only to be specified if != company.currency_id)
"""
return [{'acc_name': _('Cash'), 'account_type': 'cash'}, {'acc_name': _('Bank'), 'account_type': 'bank'}]
def open_select_template_wizard(self):
# Add action to open wizard to select between several templates
if not self.company_id.chart_template_id:
todo = self.env['ir.actions.todo']
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_wizard_multi_chart')
if action_rec:
todo.create({'action_id': action_rec.id, 'name': _('Choose Accounting Template')})
return True
@api.model
def _prepare_transfer_account_for_direct_creation(self, name, company):
""" Prepare values to create a transfer account directly, based on the
method _prepare_transfer_account_template().
This is needed when dealing with installation of payment modules
that requires the creation of their own transfer account.
:param name: The transfer account name.
:param company: The company owning this account.
:return: A dictionary of values to create a new account.account.
"""
vals = self._prepare_transfer_account_template()
digits = self.code_digits or 6
prefix = self.transfer_account_code_prefix or ''
vals.update({
'code': self.env['account.account']._search_new_account_code(company, digits, prefix),
'name': name,
'company_id': company.id,
})
del(vals['chart_template_id'])
return vals
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
"""
This method is used for creating journals.
:param acc_template_ref: Account templates reference.
:param company_id: company to generate journals for.
:returns: True
"""
JournalObj = self.env['account.journal']
for vals_journal in self._prepare_all_journals(acc_template_ref, company, journals_dict=journals_dict):
journal = JournalObj.create(vals_journal)
if vals_journal['type'] == 'general' and vals_journal['code'] == _('EXCH'):
company.write({'currency_exchange_journal_id': journal.id})
if vals_journal['type'] == 'general' and vals_journal['code'] == _('CABA'):
company.write({'tax_cash_basis_journal_id': journal.id})
return True
def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):
def _get_default_account(journal_vals, type='debit'):
# Get the default accounts
default_account = False
if journal['type'] == 'sale':
default_account = acc_template_ref.get(self.property_account_income_categ_id.id)
elif journal['type'] == 'purchase':
default_account = acc_template_ref.get(self.property_account_expense_categ_id.id)
return default_account
journals = [{'name': _('Customer Invoices'), 'type': 'sale', 'code': _('INV'), 'favorite': True, 'color': 11, 'sequence': 5},
{'name': _('Vendor Bills'), 'type': 'purchase', 'code': _('BILL'), 'favorite': True, 'color': 11, 'sequence': 6},
{'name': _('Miscellaneous Operations'), 'type': 'general', 'code': _('MISC'), 'favorite': True, 'sequence': 7},
{'name': _('Exchange Difference'), 'type': 'general', 'code': _('EXCH'), 'favorite': False, 'sequence': 9},
{'name': _('Cash Basis Taxes'), 'type': 'general', 'code': _('CABA'), 'favorite': False, 'sequence': 10}]
if journals_dict != None:
journals.extend(journals_dict)
self.ensure_one()
journal_data = []
for journal in journals:
vals = {
'type': journal['type'],
'name': journal['name'],
'code': journal['code'],
'company_id': company.id,
'default_account_id': _get_default_account(journal),
'show_on_dashboard': journal['favorite'],
'color': journal.get('color', False),
'sequence': journal['sequence']
}
journal_data.append(vals)
return journal_data
def generate_properties(self, acc_template_ref, company):
"""
This method used for creating properties.
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company to generate properties for.
:returns: True
"""
self.ensure_one()
PropertyObj = self.env['ir.property']
todo_list = [
('property_account_receivable_id', 'res.partner'),
('property_account_payable_id', 'res.partner'),
('property_account_expense_categ_id', 'product.category'),
('property_account_income_categ_id', 'product.category'),
('property_account_expense_id', 'product.template'),
('property_account_income_id', 'product.template'),
('property_tax_payable_account_id', 'account.tax.group'),
('property_tax_receivable_account_id', 'account.tax.group'),
('property_advance_tax_payment_account_id', 'account.tax.group'),
]
for field, model in todo_list:
account = self[field]
value = acc_template_ref[account.id] if account else False
if value:
PropertyObj._set_default(field, model, value, company=company)
stock_properties = [
'property_stock_account_input_categ_id',
'property_stock_account_output_categ_id',
'property_stock_valuation_account_id',
]
for stock_property in stock_properties:
account = getattr(self, stock_property)
value = account and acc_template_ref[account.id] or False
if value:
company.write({stock_property: value})
return True
def _install_template(self, company, code_digits=None, obj_wizard=None, acc_ref=None, taxes_ref=None):
""" Recursively load the template objects and create the real objects from them.
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if self.parent_id:
tmp1, tmp2 = self.parent_id._install_template(company, code_digits=code_digits, acc_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
# Ensure, even if individually, that everything is translated according to the company's language.
tmp1, tmp2 = self.with_context(lang=company.partner_id.lang)._load_template(company, code_digits=code_digits, account_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
return acc_ref, taxes_ref
def _load_template(self, company, code_digits=None, account_ref=None, taxes_ref=None):
""" Generate all the objects from the templates
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if not code_digits:
code_digits = self.code_digits
AccountTaxObj = self.env['account.tax']
# Generate taxes from templates.
generated_tax_res = self.with_context(active_test=False).tax_template_ids._generate_tax(company)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = self.generate_account(taxes_ref, account_ref, code_digits, company)
account_ref.update(account_template_ref)
# Generate account groups, from template
self.generate_account_groups(company)
# writing account values after creation of accounts
for key, value in generated_tax_res['account_dict']['account.tax'].items():
if value['cash_basis_transition_account_id']:
AccountTaxObj.browse(key).write({
'cash_basis_transition_account_id': account_ref.get(value['cash_basis_transition_account_id'], False),
})
AccountTaxRepartitionLineObj = self.env['account.tax.repartition.line']
for key, value in generated_tax_res['account_dict']['account.tax.repartition.line'].items():
if value['account_id']:
AccountTaxRepartitionLineObj.browse(key).write({
'account_id': account_ref.get(value['account_id']),
})
# Set the company accounts
self._load_company_accounts(account_ref, company)
# Create Journals - Only done for root chart template
if not self.parent_id:
self.generate_journals(account_ref, company)
# generate properties function
self.generate_properties(account_ref, company)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
self.generate_fiscal_position(taxes_ref, account_ref, company)
# Generate account operation template templates
self.generate_account_reconcile_model(taxes_ref, account_ref, company)
return account_ref, taxes_ref
def _load_company_accounts(self, account_ref, company):
# Set the default accounts on the company
accounts = {
'default_cash_difference_income_account_id': self.default_cash_difference_income_account_id.id,
'default_cash_difference_expense_account_id': self.default_cash_difference_expense_account_id.id,
'account_journal_suspense_account_id': self.account_journal_suspense_account_id.id,
'account_journal_payment_debit_account_id': self.account_journal_payment_debit_account_id.id,
'account_journal_payment_credit_account_id': self.account_journal_payment_credit_account_id.id,
'account_cash_basis_base_account_id': self.property_cash_basis_base_account_id.id,
'account_default_pos_receivable_account_id': self.default_pos_receivable_account_id.id,
'income_currency_exchange_account_id': self.income_currency_exchange_account_id.id,
'expense_currency_exchange_account_id': self.expense_currency_exchange_account_id.id,
}
values = {}
# The loop is to avoid writing when we have no values, thus avoiding erasing the account from the parent
for key, account in accounts.items():
if account_ref.get(account):
values[key] = account_ref.get(account)
company.write(values)
def create_record_with_xmlid(self, company, template, model, vals):
return self._create_records_with_xmlid(model, [(template, vals)], company).id
def _create_records_with_xmlid(self, model, template_vals, company):
""" Create records for the given model name with the given vals, and
create xml ids based on each record's template and company id.
"""
if not template_vals:
return self.env[model]
template_model = template_vals[0][0]
template_ids = [template.id for template, vals in template_vals]
template_xmlids = template_model.browse(template_ids).get_external_id()
data_list = []
for template, vals in template_vals:
module, name = template_xmlids[template.id].split('.', 1)
xml_id = "%s.%s_%s" % (module, company.id, name)
data_list.append(dict(xml_id=xml_id, values=vals, noupdate=True))
return self.env[model]._load_records(data_list)
@api.model
def _load_records(self, data_list, update=False):
# When creating a chart template create, for the liquidity transfer account
# - an account.account.template: this allow to define account.reconcile.model.template objects refering that liquidity transfer
# account although it's not existing in any xml file
# - an entry in ir_model_data: this allow to still use the method create_record_with_xmlid() and don't make any difference between
# regular accounts created and that liquidity transfer account
records = super(AccountChartTemplate, self)._load_records(data_list, update)
account_data_list = []
for data, record in zip(data_list, records):
# Create the transfer account only for leaf chart template in the hierarchy.
if record.parent_id:
continue
if data.get('xml_id'):
account_xml_id = data['xml_id'] + '_liquidity_transfer'
if not self.env.ref(account_xml_id, raise_if_not_found=False):
account_vals = record._prepare_transfer_account_template()
account_data_list.append(dict(
xml_id=account_xml_id,
values=account_vals,
noupdate=data.get('noupdate'),
))
self.env['account.account.template']._load_records(account_data_list, update)
return records
def _get_account_vals(self, company, account_template, code_acc, tax_template_ref):
""" This method generates a dictionary of all the values for the account that will be created.
"""
self.ensure_one()
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
val = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'user_type_id': account_template.user_type_id and account_template.user_type_id.id or False,
'reconcile': account_template.reconcile,
'note': account_template.note,
'tax_ids': [(6, 0, tax_ids)],
'company_id': company.id,
'tag_ids': [(6, 0, [t.id for t in account_template.tag_ids])],
}
return val
def generate_account(self, tax_template_ref, acc_template_ref, code_digits, company):
""" This method generates accounts from account templates.
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary containing the mapping between the account templates and generated accounts (will be populated)
:param code_digits: number of digits to use for account code.
:param company_id: company to generate accounts for.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_tmpl_obj = self.env['account.account.template']
acc_template = account_tmpl_obj.search([('nocreate', '!=', True), ('chart_template_id', '=', self.id)], order='id')
template_vals = []
for account_template in acc_template:
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits:
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
vals = self._get_account_vals(company, account_template, code_acc, tax_template_ref)
template_vals.append((account_template, vals))
accounts = self._create_records_with_xmlid('account.account', template_vals, company)
for template, account in zip(acc_template, accounts):
acc_template_ref[template.id] = account.id
return acc_template_ref
def generate_account_groups(self, company):
""" This method generates account groups from account groups templates.
:param company: company to generate the account groups for
"""
self.ensure_one()
group_templates = self.env['account.group.template'].search([('chart_template_id', '=', self.id)])
template_vals = []
for group_template in group_templates:
vals = {
'name': group_template.name,
'code_prefix_start': group_template.code_prefix_start,
'code_prefix_end': group_template.code_prefix_end,
'company_id': company.id,
}
template_vals.append((group_template, vals))
groups = self._create_records_with_xmlid('account.group', template_vals, company)
def _prepare_reconcile_model_vals(self, company, account_reconcile_model, acc_template_ref, tax_template_ref):
""" This method generates a dictionary of all the values for the account.reconcile.model that will be created.
"""
self.ensure_one()
account_reconcile_model_lines = self.env['account.reconcile.model.line.template'].search([
('model_id', '=', account_reconcile_model.id)
])
return {
'name': account_reconcile_model.name,
'sequence': account_reconcile_model.sequence,
'company_id': company.id,
'rule_type': account_reconcile_model.rule_type,
'auto_reconcile': account_reconcile_model.auto_reconcile,
'to_check': account_reconcile_model.to_check,
'match_journal_ids': [(6, None, account_reconcile_model.match_journal_ids.ids)],
'match_nature': account_reconcile_model.match_nature,
'match_amount': account_reconcile_model.match_amount,
'match_amount_min': account_reconcile_model.match_amount_min,
'match_amount_max': account_reconcile_model.match_amount_max,
'match_label': account_reconcile_model.match_label,
'match_label_param': account_reconcile_model.match_label_param,
'match_note': account_reconcile_model.match_note,
'match_note_param': account_reconcile_model.match_note_param,
'match_transaction_type': account_reconcile_model.match_transaction_type,
'match_transaction_type_param': account_reconcile_model.match_transaction_type_param,
'match_same_currency': account_reconcile_model.match_same_currency,
'match_total_amount': account_reconcile_model.match_total_amount,
'match_total_amount_param': account_reconcile_model.match_total_amount_param,
'match_partner': account_reconcile_model.match_partner,
'match_partner_ids': [(6, None, account_reconcile_model.match_partner_ids.ids)],
'match_partner_category_ids': [(6, None, account_reconcile_model.match_partner_category_ids.ids)],
'line_ids': [(0, 0, {
'account_id': acc_template_ref[line.account_id.id],
'label': line.label,
'amount_type': line.amount_type,
'force_tax_included': line.force_tax_included,
'amount_string': line.amount_string,
'tax_ids': [[4, tax_template_ref[tax.id], 0] for tax in line.tax_ids],
}) for line in account_reconcile_model_lines],
}
def generate_account_reconcile_model(self, tax_template_ref, acc_template_ref, company):
""" This method creates account reconcile models
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary with the mapping between the account templates and the real accounts.
:param company_id: company to create models for
:returns: return new_account_reconcile_model for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_reconcile_models = self.env['account.reconcile.model.template'].search([
('chart_template_id', '=', self.id)
])
for account_reconcile_model in account_reconcile_models:
vals = self._prepare_reconcile_model_vals(company, account_reconcile_model, acc_template_ref, tax_template_ref)
self.create_record_with_xmlid(company, account_reconcile_model, 'account.reconcile.model', vals)
# Create a default rule for the reconciliation widget matching invoices automatically.
self.env['account.reconcile.model'].sudo().create({
"name": _('Invoices Matching Rule'),
"sequence": '1',
"rule_type": 'invoice_matching',
"auto_reconcile": False,
"match_nature": 'both',
"match_same_currency": True,
"match_total_amount": True,
"match_total_amount_param": 100,
"match_partner": True,
"company_id": company.id,
})
return True
def _get_fp_vals(self, company, position):
return {
'company_id': company.id,
'sequence': position.sequence,
'name': position.name,
'note': position.note,
'auto_apply': position.auto_apply,
'vat_required': position.vat_required,
'country_id': position.country_id.id,
'country_group_id': position.country_group_id.id,
'state_ids': position.state_ids and [(6,0, position.state_ids.ids)] or [],
'zip_from': position.zip_from,
'zip_to': position.zip_to,
}
def generate_fiscal_position(self, tax_template_ref, acc_template_ref, company):
""" This method generates Fiscal Position, Fiscal Position Accounts
and Fiscal Position Taxes from templates.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: the company to generate fiscal position data for
:returns: True
"""
self.ensure_one()
positions = self.env['account.fiscal.position.template'].search([('chart_template_id', '=', self.id)])
# first create fiscal positions in batch
template_vals = []
for position in positions:
fp_vals = self._get_fp_vals(company, position)
template_vals.append((position, fp_vals))
fps = self._create_records_with_xmlid('account.fiscal.position', template_vals, company)
# then create fiscal position taxes and accounts
tax_template_vals = []
account_template_vals = []
for position, fp in zip(positions, fps):
for tax in position.tax_ids:
tax_template_vals.append((tax, {
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': fp.id,
}))
for acc in position.account_ids:
account_template_vals.append((acc, {
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': fp.id,
}))
self._create_records_with_xmlid('account.fiscal.position.tax', tax_template_vals, company)
self._create_records_with_xmlid('account.fiscal.position.account', account_template_vals, company)
return True
class AccountTaxTemplate(models.Model):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_order = 'id'
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Tax Name', required=True)
type_tax_use = fields.Selection(TYPE_TAX_USE, string='Tax Type', required=True, default="sale",
help="Determines where the tax is selectable. Note : 'None' means a tax can't be used by itself, however it can still be used in a group.")
tax_scope = fields.Selection([('service', 'Service'), ('consu', 'Consumable')], help="Restrict the use of taxes to a type of product.")
amount_type = fields.Selection(default='percent', string="Tax Computation", required=True,
selection=[('group', 'Group of Taxes'), ('fixed', 'Fixed'), ('percent', 'Percentage of Price'), ('division', 'Percentage of Price Tax Included')])
active = fields.Boolean(default=True, help="Set active to false to hide the tax without removing it.")
children_tax_ids = fields.Many2many('account.tax.template', 'account_tax_template_filiation_rel', 'parent_tax', 'child_tax', string='Children Taxes')
sequence = fields.Integer(required=True, default=1,
help="The sequence field is used to define order in which the tax lines are applied.")
amount = fields.Float(required=True, digits=(16, 4), default=0)
description = fields.Char(string='Display on Invoices')
price_include = fields.Boolean(string='Included in Price', default=False,
help="Check this if the price you use on the product and invoices includes this tax.")
include_base_amount = fields.Boolean(string='Affect Subsequent Taxes', default=False,
help="If set, taxes with a higher sequence than this one will be affected by it, provided they accept it.")
is_base_affected = fields.Boolean(
string="Base Affected by Previous Taxes",
default=True,
help="If set, taxes with a lower sequence might affect this one, provided they try to do it.")
analytic = fields.Boolean(string="Analytic Cost", help="If set, the amount computed by this tax will be assigned to the same analytic account as the invoice line (if any)")
invoice_repartition_line_ids = fields.One2many(string="Repartition for Invoices", comodel_name="account.tax.repartition.line.template", inverse_name="invoice_tax_id", copy=True, help="Repartition when the tax is used on an invoice")
refund_repartition_line_ids = fields.One2many(string="Repartition for Refund Invoices", comodel_name="account.tax.repartition.line.template", inverse_name="refund_tax_id", copy=True, help="Repartition when the tax is used on a refund")
tax_group_id = fields.Many2one('account.tax.group', string="Tax Group")
tax_exigibility = fields.Selection(
[('on_invoice', 'Based on Invoice'),
('on_payment', 'Based on Payment'),
], string='Tax Due', default='on_invoice',
help="Based on Invoice: the tax is due as soon as the invoice is validated.\n"
"Based on Payment: the tax is due as soon as the payment of the invoice is received.")
cash_basis_transition_account_id = fields.Many2one(
comodel_name='account.account.template',
string="Cash Basis Transition Account",
domain=[('deprecated', '=', False)],
help="Account used to transition the tax amount for cash basis taxes. It will contain the tax amount as long as the original invoice has not been reconciled ; at reconciliation, this amount cancelled on this account and put on the regular tax account.")
_sql_constraints = [
('name_company_uniq', 'unique(name, type_tax_use, tax_scope, chart_template_id)', 'Tax names must be unique !'),
]
@api.depends('name', 'description')
def name_get(self):
res = []
for record in self:
name = record.description and record.description or record.name
res.append((record.id, name))
return res
def _get_tax_vals(self, company, tax_template_to_tax):
""" This method generates a dictionary of all the values for the tax that will be created.
"""
# Compute children tax ids
children_ids = []
for child_tax in self.children_tax_ids:
if tax_template_to_tax.get(child_tax.id):
children_ids.append(tax_template_to_tax[child_tax.id])
self.ensure_one()
val = {
'name': self.name,
'type_tax_use': self.type_tax_use,
'tax_scope': self.tax_scope,
'amount_type': self.amount_type,
'active': self.active,
'company_id': company.id,
'sequence': self.sequence,
'amount': self.amount,
'description': self.description,
'price_include': self.price_include,
'include_base_amount': self.include_base_amount,
'is_base_affected': self.is_base_affected,
'analytic': self.analytic,
'children_tax_ids': [(6, 0, children_ids)],
'tax_exigibility': self.tax_exigibility,
}
# We add repartition lines if there are some, so that if there are none,
# default_get is called and creates the default ones properly.
if self.invoice_repartition_line_ids:
val['invoice_repartition_line_ids'] = self.invoice_repartition_line_ids.get_repartition_line_create_vals(company)
if self.refund_repartition_line_ids:
val['refund_repartition_line_ids'] = self.refund_repartition_line_ids.get_repartition_line_create_vals(company)
if self.tax_group_id:
val['tax_group_id'] = self.tax_group_id.id
return val
def _generate_tax(self, company):
""" This method generate taxes from templates.
:param company: the company for which the taxes should be created from templates in self
:returns: {
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
# default_company_id is needed in context to allow creation of default
# repartition lines on taxes
ChartTemplate = self.env['account.chart.template'].with_context(default_company_id=company.id)
todo_dict = {'account.tax': {}, 'account.tax.repartition.line': {}}
tax_template_to_tax = {}
templates_todo = list(self)
while templates_todo:
templates = templates_todo
templates_todo = []
# create taxes in batch
tax_template_vals = []
for template in templates:
if all(child.id in tax_template_to_tax for child in template.children_tax_ids):
vals = template._get_tax_vals(company, tax_template_to_tax)
if self.chart_template_id.country_id:
vals['country_id'] = self.chart_template_id.country_id.id
elif company.account_fiscal_country_id:
vals['country_id'] = company.account_fiscal_country_id.id
else:
# Will happen for generic CoAs such as syscohada (they are available for multiple countries, and don't have any country_id)
raise UserError(_("Please first define a fiscal country for company %s.", company.name))
tax_template_vals.append((template, vals))
else:
# defer the creation of this tax to the next batch
templates_todo.append(template)
taxes = ChartTemplate._create_records_with_xmlid('account.tax', tax_template_vals, company)
# fill in tax_template_to_tax and todo_dict
for tax, (template, vals) in zip(taxes, tax_template_vals):
tax_template_to_tax[template.id] = tax.id
# Since the accounts have not been created yet, we have to wait before filling these fields
todo_dict['account.tax'][tax.id] = {
'cash_basis_transition_account_id': template.cash_basis_transition_account_id.id,
}
# We also have to delay the assignation of accounts to repartition lines
# The below code assigns the account_id to the repartition lines according
# to the corresponding repartition line in the template, based on the order.
# As we just created the repartition lines, tax.invoice_repartition_line_ids is not well sorted.
# But we can force the sort by calling sort()
all_tax_rep_lines = tax.invoice_repartition_line_ids.sorted() + tax.refund_repartition_line_ids.sorted()
all_template_rep_lines = template.invoice_repartition_line_ids + template.refund_repartition_line_ids
for i in range(0, len(all_template_rep_lines)):
# We assume template and tax repartition lines are in the same order
template_account = all_template_rep_lines[i].account_id
if template_account:
todo_dict['account.tax.repartition.line'][all_tax_rep_lines[i].id] = {
'account_id': template_account.id,
}
if any(template.tax_exigibility == 'on_payment' for template in self):
# When a CoA is being installed automatically and if it is creating account tax(es) whose field `Use Cash Basis`(tax_exigibility) is set to True by default
# (example of such CoA's are l10n_fr and l10n_mx) then in the `Accounting Settings` the option `Cash Basis` should be checked by default.
company.tax_exigibility = True
return {
'tax_template_to_tax': tax_template_to_tax,
'account_dict': todo_dict
}
# Tax Repartition Line Template
class AccountTaxRepartitionLineTemplate(models.Model):
_name = "account.tax.repartition.line.template"
_description = "Tax Repartition Line Template"
factor_percent = fields.Float(string="%", required=True, help="Factor to apply on the account move lines generated from this distribution line, in percents")
repartition_type = fields.Selection(string="Based On", selection=[('base', 'Base'), ('tax', 'of tax')], required=True, default='tax', help="Base on which the factor will be applied.")
account_id = fields.Many2one(string="Account", comodel_name='account.account.template', help="Account on which to post the tax amount")
invoice_tax_id = fields.Many2one(comodel_name='account.tax.template', help="The tax set to apply this distribution on invoices. Mutually exclusive with refund_tax_id")
refund_tax_id = fields.Many2one(comodel_name='account.tax.template', help="The tax set to apply this distribution on refund invoices. Mutually exclusive with invoice_tax_id")
tag_ids = fields.Many2many(string="Financial Tags", relation='account_tax_repartition_financial_tags', comodel_name='account.account.tag', copy=True, help="Additional tags that will be assigned by this repartition line for use in financial reports")
use_in_tax_closing = fields.Boolean(string="Tax Closing Entry")
# These last two fields are helpers used to ease the declaration of account.account.tag objects in XML.
# They are directly linked to account.tax.report.line objects, which create corresponding + and - tags
# at creation. This way, we avoid declaring + and - separately every time.
plus_report_line_ids = fields.Many2many(string="Plus Tax Report Lines", relation='account_tax_repartition_plus_report_line', comodel_name='account.tax.report.line', copy=True, help="Tax report lines whose '+' tag will be assigned to move lines by this repartition line")
minus_report_line_ids = fields.Many2many(string="Minus Report Lines", relation='account_tax_repartition_minus_report_line', comodel_name='account.tax.report.line', copy=True, help="Tax report lines whose '-' tag will be assigned to move lines by this repartition line")
@api.model
def create(self, vals):
if vals.get('plus_report_line_ids'):
vals['plus_report_line_ids'] = self._convert_tag_syntax_to_orm(vals['plus_report_line_ids'])
if vals.get('minus_report_line_ids'):
vals['minus_report_line_ids'] = self._convert_tag_syntax_to_orm(vals['minus_report_line_ids'])
if vals.get('tag_ids'):
vals['tag_ids'] = self._convert_tag_syntax_to_orm(vals['tag_ids'])
if vals.get('use_in_tax_closing') is None:
if not vals.get('account_id'):
vals['use_in_tax_closing'] = False
else:
internal_group = self.env['account.account.template'].browse(vals.get('account_id')).user_type_id.internal_group
vals['use_in_tax_closing'] = not (internal_group == 'income' or internal_group == 'expense')
return super(AccountTaxRepartitionLineTemplate, self).create(vals)
@api.model
def _convert_tag_syntax_to_orm(self, tags_list):
""" Repartition lines give the possibility to directly give
a list of ids to create for tags instead of a list of ORM commands.
This function checks that tags_list uses this syntactic sugar and returns
an ORM-compliant version of it if it does.
"""
if tags_list and all(isinstance(elem, int) for elem in tags_list):
return [(6, False, tags_list)]
return tags_list
@api.constrains('invoice_tax_id', 'refund_tax_id')
def validate_tax_template_link(self):
for record in self:
if record.invoice_tax_id and record.refund_tax_id:
raise ValidationError(_("Tax distribution line templates should apply to either invoices or refunds, not both at the same time. invoice_tax_id and refund_tax_id should not be set together."))
@api.constrains('plus_report_line_ids', 'minus_report_line_ids')
def validate_tags(self):
all_tax_rep_lines = self.mapped('plus_report_line_ids') + self.mapped('minus_report_line_ids')
lines_without_tag = all_tax_rep_lines.filtered(lambda x: not x.tag_name)
if lines_without_tag:
raise ValidationError(_("The following tax report lines are used in some tax distribution template though they don't generate any tag: %s . This probably means you forgot to set a tag_name on these lines.", str(lines_without_tag.mapped('name'))))
def get_repartition_line_create_vals(self, company):
rslt = [(5, 0, 0)]
for record in self:
tags_to_add = self.env['account.account.tag']
tags_to_add += record.plus_report_line_ids.mapped('tag_ids').filtered(lambda x: not x.tax_negate)
tags_to_add += record.minus_report_line_ids.mapped('tag_ids').filtered(lambda x: x.tax_negate)
tags_to_add += record.tag_ids
rslt.append((0, 0, {
'factor_percent': record.factor_percent,
'repartition_type': record.repartition_type,
'tag_ids': [(6, 0, tags_to_add.ids)],
'company_id': company.id,
'use_in_tax_closing': record.use_in_tax_closing
}))
return rslt
# Fiscal Position Templates
class AccountFiscalPositionTemplate(models.Model):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
sequence = fields.Integer()
name = fields.Char(string='Fiscal Position Template', required=True)
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
account_ids = fields.One2many('account.fiscal.position.account.template', 'position_id', string='Account Mapping')
tax_ids = fields.One2many('account.fiscal.position.tax.template', 'position_id', string='Tax Mapping')
note = fields.Text(string='Notes')
auto_apply = fields.Boolean(string='Detect Automatically', help="Apply automatically this fiscal position.")
vat_required = fields.Boolean(string='VAT required', help="Apply only if partner has a VAT number.")
country_id = fields.Many2one('res.country', string='Country',
help="Apply only if delivery country matches.")
country_group_id = fields.Many2one('res.country.group', string='Country Group',
help="Apply only if delivery country matches the group.")
state_ids = fields.Many2many('res.country.state', string='Federal States')
zip_from = fields.Char(string='Zip Range From')
zip_to = fields.Char(string='Zip Range To')
class AccountFiscalPositionTaxTemplate(models.Model):
_name = 'account.fiscal.position.tax.template'
_description = 'Tax Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Position', required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax.template', string='Tax Source', required=True)
tax_dest_id = fields.Many2one('account.tax.template', string='Replacement Tax')
class AccountFiscalPositionAccountTemplate(models.Model):
_name = 'account.fiscal.position.account.template'
_description = 'Accounts Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Mapping', required=True, ondelete='cascade')
account_src_id = fields.Many2one('account.account.template', string='Account Source', required=True)
account_dest_id = fields.Many2one('account.account.template', string='Account Destination', required=True)
class AccountReconcileModelTemplate(models.Model):
_name = "account.reconcile.model.template"
_description = 'Reconcile Model Template'
# Base fields.
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Button Label', required=True)
sequence = fields.Integer(required=True, default=10)
rule_type = fields.Selection(selection=[
('writeoff_button', 'Manually create a write-off on clicked button'),
('writeoff_suggestion', 'Suggest a write-off'),
('invoice_matching', 'Match existing invoices/bills')
], string='Type', default='writeoff_button', required=True)
auto_reconcile = fields.Boolean(string='Auto-validate',
help='Validate the statement line automatically (reconciliation based on your rule).')
to_check = fields.Boolean(string='To Check', default=False, help='This matching rule is used when the user is not certain of all the information of the counterpart.')
matching_order = fields.Selection(
selection=[
('old_first', 'Oldest first'),
('new_first', 'Newest first'),
]
)
# ===== Conditions =====
match_text_location_label = fields.Boolean(
default=True,
help="Search in the Statement's Label to find the Invoice/Payment's reference",
)
match_text_location_note = fields.Boolean(
default=False,
help="Search in the Statement's Note to find the Invoice/Payment's reference",
)
match_text_location_reference = fields.Boolean(
default=False,
help="Search in the Statement's Reference to find the Invoice/Payment's reference",
)
match_journal_ids = fields.Many2many('account.journal', string='Journals',
domain="[('type', 'in', ('bank', 'cash'))]",
help='The reconciliation model will only be available from the selected journals.')
match_nature = fields.Selection(selection=[
('amount_received', 'Amount Received'),
('amount_paid', 'Amount Paid'),
('both', 'Amount Paid/Received')
], string='Amount Nature', required=True, default='both',
help='''The reconciliation model will only be applied to the selected transaction type:
* Amount Received: Only applied when receiving an amount.
* Amount Paid: Only applied when paying an amount.
* Amount Paid/Received: Applied in both cases.''')
match_amount = fields.Selection(selection=[
('lower', 'Is Lower Than'),
('greater', 'Is Greater Than'),
('between', 'Is Between'),
], string='Amount',
help='The reconciliation model will only be applied when the amount being lower than, greater than or between specified amount(s).')
match_amount_min = fields.Float(string='Amount Min Parameter')
match_amount_max = fields.Float(string='Amount Max Parameter')
match_label = fields.Selection(selection=[
('contains', 'Contains'),
('not_contains', 'Not Contains'),
('match_regex', 'Match Regex'),
], string='Label', help='''The reconciliation model will only be applied when the label:
* Contains: The proposition label must contains this string (case insensitive).
* Not Contains: Negation of "Contains".
* Match Regex: Define your own regular expression.''')
match_label_param = fields.Char(string='Label Parameter')
match_note = fields.Selection(selection=[
('contains', 'Contains'),
('not_contains', 'Not Contains'),
('match_regex', 'Match Regex'),
], string='Note', help='''The reconciliation model will only be applied when the note:
* Contains: The proposition note must contains this string (case insensitive).
* Not Contains: Negation of "Contains".
* Match Regex: Define your own regular expression.''')
match_note_param = fields.Char(string='Note Parameter')
match_transaction_type = fields.Selection(selection=[
('contains', 'Contains'),
('not_contains', 'Not Contains'),
('match_regex', 'Match Regex'),
], string='Transaction Type', help='''The reconciliation model will only be applied when the transaction type:
* Contains: The proposition transaction type must contains this string (case insensitive).
* Not Contains: Negation of "Contains".
* Match Regex: Define your own regular expression.''')
match_transaction_type_param = fields.Char(string='Transaction Type Parameter')
match_same_currency = fields.Boolean(string='Same Currency Matching', default=True,
help='Restrict to propositions having the same currency as the statement line.')
match_total_amount = fields.Boolean(string='Amount Matching', default=True,
help='The sum of total residual amount propositions matches the statement line amount.')
match_total_amount_param = fields.Float(string='Amount Matching %', default=100,
help='The sum of total residual amount propositions matches the statement line amount under this percentage.')
match_partner = fields.Boolean(string='Partner Is Set',
help='The reconciliation model will only be applied when a customer/vendor is set.')
match_partner_ids = fields.Many2many('res.partner', string='Restrict Partners to',
help='The reconciliation model will only be applied to the selected customers/vendors.')
match_partner_category_ids = fields.Many2many('res.partner.category', string='Restrict Partner Categories to',
help='The reconciliation model will only be applied to the selected customer/vendor categories.')
line_ids = fields.One2many('account.reconcile.model.line.template', 'model_id')
decimal_separator = fields.Char(help="Every character that is nor a digit nor this separator will be removed from the matching string")
class AccountReconcileModelLineTemplate(models.Model):
_name = "account.reconcile.model.line.template"
_description = 'Reconcile Model Line Template'
model_id = fields.Many2one('account.reconcile.model.template')
sequence = fields.Integer(required=True, default=10)
account_id = fields.Many2one('account.account.template', string='Account', ondelete='cascade', domain=[('deprecated', '=', False)])
label = fields.Char(string='Journal Item Label')
amount_type = fields.Selection([
('fixed', 'Fixed'),
('percentage', 'Percentage of balance'),
('regex', 'From label'),
], required=True, default='percentage')
amount_string = fields.Char(string="Amount")
force_tax_included = fields.Boolean(string='Tax Included in Price', help='Force the tax to be managed as a price included tax.')
tax_ids = fields.Many2many('account.tax.template', string='Taxes', ondelete='restrict')
|
gpl-3.0
| -6,534,800,096,511,794,000
| 55.595583
| 274
| 0.642282
| false
| 4.084314
| false
| false
| false
|
Brocade-OpenSource/OpenStack-DNRM-Neutron
|
neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py
|
1
|
2121
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ext_gw_mode
Revision ID: 128e042a2b68
Revises: 32b517556ec9
Create Date: 2013-03-27 00:35:17.323280
"""
# revision identifiers, used by Alembic.
revision = '128e042a2b68'
down_revision = '32b517556ec9'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin',
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2',
'neutron.plugins.nec.nec_plugin.NECPluginV2',
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2',
'neutron.plugins.niblick.interceptor_plugin.Interceptor',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.add_column('routers', sa.Column('enable_snat', sa.Boolean(),
nullable=False, default=True))
# Set enable_snat to True for existing routers
op.execute("UPDATE routers SET enable_snat=True")
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_column('routers', 'enable_snat')
|
apache-2.0
| -8,302,952,998,911,602,000
| 32.140625
| 78
| 0.728901
| false
| 3.517413
| false
| false
| false
|
amolenaar/gaphor
|
gaphor/core/modeling/coremodel.py
|
1
|
2200
|
# This file is generated by codegen.py. DO NOT EDIT!
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Callable, List, Optional
from gaphor.core.modeling.element import Element
from gaphor.core.modeling.properties import (
association,
attribute,
derived,
derivedunion,
enumeration,
redefine,
relation_many,
relation_one,
)
if TYPE_CHECKING:
from gaphor.UML import Dependency, Namespace
# 8: override Element
# defined above
# 11: override NamedElement
# Define extra attributes defined in UML model
class NamedElement(Element):
name: attribute[str]
qualifiedName: derived[List[str]]
namespace: relation_one[Namespace]
clientDependency: relation_many[Dependency]
supplierDependency: relation_many[Dependency]
class PackageableElement(NamedElement):
pass
# 55: override Diagram
# defined in gaphor.core.modeling.diagram
# 46: override Presentation
# defined in gaphor.core.modeling.presentation
class Comment(Element):
body: attribute[str]
annotatedElement: relation_many[Element]
# 40: override StyleSheet
# defined in gaphor.core.modeling.presentation
NamedElement.name = attribute("name", str)
Comment.body = attribute("body", str)
# 43: override StyleSheet.styleSheet
# defined in gaphor.core.modeling.presentation
# 52: override Presentation.subject
# defined in gaphor.core.modeling.presentation
# 49: override Element.presentation
# defined in gaphor.core.modeling.presentation
Comment.annotatedElement = association(
"annotatedElement", Element, opposite="ownedComment"
)
Element.ownedComment = association("ownedComment", Comment, opposite="annotatedElement")
# 20: override NamedElement.qualifiedName(NamedElement.namespace): derived[List[str]]
def _namedelement_qualifiedname(self) -> List[str]:
"""
Returns the qualified name of the element as a tuple
"""
if self.namespace:
return _namedelement_qualifiedname(self.namespace) + [self.name]
else:
return [self.name]
NamedElement.qualifiedName = derived(
NamedElement,
"qualifiedName",
List[str],
0,
1,
lambda obj: [_namedelement_qualifiedname(obj)],
)
|
lgpl-2.1
| 8,562,233,402,867,397,000
| 22.913043
| 88
| 0.742273
| false
| 3.826087
| false
| false
| false
|
rackerlabs/deuce-client
|
deuceclient/auth/rackspaceauth.py
|
1
|
2721
|
"""
Deuce Rackspace Authentication API
"""
import logging
import deuceclient.auth
import deuceclient.auth.openstackauth
def get_identity_apihost(datacenter):
if datacenter in ('us', 'uk', 'lon', 'iad', 'dfw', 'ord'):
return 'https://identity.api.rackspacecloud.com/v2.0'
elif datacenter in ('hkg', 'syd'):
return'https://{0:}.identity.api.rackspacecloud.com/v2.0'.\
format(datacenter)
else:
raise deuceclient.auth.AuthenticationError(
'Unknown Data Center: {0:}'.format(datacenter))
class RackspaceAuthentication(
deuceclient.auth.openstackauth.OpenStackAuthentication):
"""Rackspace Identity Authentication Support
Only difference between this and OpenStackAuthentication is that this
can know the servers without one being specified.
"""
def __init__(self, userid=None, usertype=None,
credentials=None, auth_method=None,
datacenter=None, auth_url=None):
# If an authentication url is not provided then create one using
# Rackspace's Identity Service for the specified datacenter
if auth_url is None:
if datacenter is None:
raise deuceclient.auth.AuthenticationError(
'Required Parameter, datacenter, not specified.')
auth_url = get_identity_apihost(datacenter)
log = logging.getLogger(__name__)
log.debug('No AuthURL specified. Using {0:}'.format(auth_url))
super(RackspaceAuthentication, self).__init__(userid=userid,
usertype=usertype,
credentials=credentials,
auth_method=auth_method,
datacenter=datacenter,
auth_url=auth_url)
@staticmethod
def _management_url(*args, **kwargs):
# NOTE(TheSriram): kwarg region_name is the datacenter supplied
# when instantiating RackspaceAuthentication class
return get_identity_apihost(kwargs['region_name'])
@staticmethod
def patch_management_url():
from keystoneclient.service_catalog import ServiceCatalog
ServiceCatalog.url_for = RackspaceAuthentication._management_url
def get_client(self):
"""Retrieve the Rackspace Client
"""
# NOTE(TheSriram): The exceptions thrown if any, would still
# bear OpenstackAuthentication class in the message.
RackspaceAuthentication.patch_management_url()
return super(RackspaceAuthentication, self).get_client()
|
apache-2.0
| -272,386,779,934,035,970
| 39.014706
| 78
| 0.609335
| false
| 4.893885
| false
| false
| false
|
lambdaloop/CIT-biosignals
|
pygame/present_images_pygame.py
|
1
|
1565
|
import pygame
from pygame.locals import *
from constants import *
from generate_images import *
import time
import pandas as pd
from pylsl import StreamInfo, StreamOutlet
import random
pygame.init()
#pygame.mouse.set_visible(False)
from screen import screen
from drawstuff import *
study_time = int(time.time())
print(study_time)
info = StreamInfo('Ganglion_EEG', 'Markers', 1, 0.0, 'int32',
'marker')
outlet = StreamOutlet(info)
images = gen_images()
def check_for_key(key=K_ESCAPE):
while True:
event = pygame.event.poll()
if event.type == 0:
return False
elif event.dict.get('key', -1) == key:
return True
def check_for_escape():
return check_for_key(K_ESCAPE)
def finish_stuff(early=False):
return
text_slide("""Start recording and
press space to continue""")
while not check_for_key(K_SPACE):
pass
focus_slide()
outlet.push_sample([-1], time.time())
time.sleep(0.5)
images = [(path, pygame.image.load(path)) for path in images]
t = time.time()
for image_path, img in images:
# if d['is_shown'] != 1:
# continue
# word = d['word']
print(time.time() - t)
t = time.time()
print(image_path, hash(image_path))
image_slide(img)
outlet.push_sample([hash(image_path)], time.time())
time.sleep(4)
if check_for_escape():
finish_stuff(early=True)
exit()
focus_slide()
outlet.push_sample([-1], time.time())
time.sleep(2.0)
if check_for_escape():
finish_stuff(early=True)
exit()
|
mit
| 3,029,757,093,882,281,000
| 19.324675
| 61
| 0.630671
| false
| 3.206967
| false
| false
| false
|
Metruption/hophacks17project
|
src/analysis.py
|
1
|
2022
|
"""
analysis.py: A wrapper for the Bark Partner API
Copyright (C) 2016 Aaron Thomas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import pybark
from bark_config import BARK_TOKEN
def check_message(message):
'''
preconditions:
@param message is a string
postconditions:
returns a boolean
True of the message is abusive
False if the message is not abusive
'''
resp = pybark.woof(BARK_TOKEN, message)
resp = json.loads(resp)
power_level = [resp['abusive'],resp['results']['sentiment'] in ["VERY_NEGATIVE", "NEGATIVE"]]
bad_varname = ["profanity", "cyberbullying"]
power_level = power_level + [resp['results'][i]['abusive'] for i in bad_varname]
return sum(power_level) >=2
def find_handle(message):
'''
preconditions:
@param message is a string
postconditions:
returns a tuple containing all of the twitter ids of any @handles given in the text of the direct message
returns a tuple containing all of the twitter @handles given in the text of the direct message
example outputs:
() none given
(aaron_the_king,,@hack,@hateishate_) three given
(@aaron_the_king,@jack) two
(@aaron_the_king) one given
'''
words = message.split(" ")
handles = (word for word in words if word.startswith('@'))
return handles
|
gpl-3.0
| -570,306,720,088,601,800
| 35.763636
| 113
| 0.670129
| false
| 4.06841
| false
| false
| false
|
yadt/yadt-config-rpm-maker
|
src/config_rpm_maker/token/treenode.py
|
1
|
1209
|
# yadt-config-rpm-maker
# Copyright (C) 2011-2013 Immobilien Scout GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class NameNotAcceptedException(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "name '%s' is not accepted, must not be empty or null" % self.name
class TreeNode:
def __init__(self, name, children=None):
if children is None:
children=[]
if name is None or len(name) == 0:
raise NameNotAcceptedException(name)
self.name = name
self.children = set(children)
|
gpl-3.0
| 7,911,371,813,833,091,000
| 35.636364
| 81
| 0.679901
| false
| 3.86262
| false
| false
| false
|
cfobel/camip
|
camip/bin/vpr_net_to_df.py
|
1
|
2970
|
import sys
from collections import OrderedDict
import numpy as np
import pandas as pd
from path_helpers import path
from vpr_netfile_parser.VprNetParser import cVprNetFileParser
try:
profile
except NameError:
profile = lambda f: f
INPUT_DRIVER_PIN = 0
LOGIC_DRIVER_PIN = 4
LOGIC_BLOCK = 0
INPUT_BLOCK = 1
OUTPUT_BLOCK = 2
CLOCK_PIN = 5
CONNECTION_CLOCK = 5
CONNECTION_DRIVER = 200
CONNECTION_SINK = 100
CONNECTION_CLOCK_DRIVER = 30
# Connection type = DRIVER_TYPE + 10 * SINK_TYPE
DELAY_IO_TO_IO = INPUT_BLOCK + 10 * OUTPUT_BLOCK
DELAY_FB_TO_FB = LOGIC_BLOCK + 10 * LOGIC_BLOCK
DELAY_IO_TO_FB = INPUT_BLOCK + 10 * LOGIC_BLOCK
DELAY_FB_TO_IO = LOGIC_BLOCK + 10 * OUTPUT_BLOCK
@profile
def vpr_net_to_df(net_file_path):
parser = cVprNetFileParser(net_file_path)
block_labels = pd.Series(parser.block_labels)
net_labels = pd.Series(parser.net_labels)
type_labels = pd.Series(['.clb', '.input', '.output'],
index=[LOGIC_BLOCK, INPUT_BLOCK,
OUTPUT_BLOCK])
type_keys = pd.DataFrame(range(type_labels.shape[0]), dtype='uint32',
index=type_labels, columns=['type_key'])
block_type_keys = type_keys.loc[parser.block_type,
'type_key'].reset_index(drop=True)
block_to_net_ids = parser.block_to_net_ids()
net_key = np.concatenate(block_to_net_ids).astype('uint32')
block_key = np.concatenate([[i] * len(v)
for i, v in
enumerate(block_to_net_ids)]).astype('uint32')
pin_key = np.concatenate(parser.block_used_pins).astype('uint32')
connections = pd.DataFrame(OrderedDict([('net_key', net_key),
('block_key', block_key),
('pin_key', pin_key)]))
connections.insert(2, 'block_type',
block_type_keys.iloc[connections.block_key].values)
connections['net_label'] = net_labels.iloc[connections.net_key].values
connections['block_label'] = block_labels.iloc[connections.block_key].values
return connections.sort(['net_key', 'block_key']).reset_index(drop=True)
def parse_args(argv=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if argv is None:
argv = sys.argv
parser = ArgumentParser(description='Convert VPR netlist `.net` file to HDF '
'connections `.h5` format.')
parser.add_argument(dest='vpr_net_file', type=path)
parser.add_argument(dest='hdf_file', type=path)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
df_netlist = vpr_net_to_df(args.vpr_net_file)
df_netlist.to_hdf(str(args.hdf_file), '/connections', format='table',
data_columns=df_netlist.columns, complib='zlib',
complevel=6)
|
gpl-2.0
| -9,034,308,781,177,645,000
| 33.137931
| 81
| 0.606734
| false
| 3.53151
| false
| false
| false
|
khchine5/xl
|
lino_xl/lib/ledger/fixtures/demo_bookings.py
|
1
|
4927
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""
Creates fictive demo bookings with monthly purchases.
See also:
- :mod:`lino_xl.lib.finan.fixtures.demo_bookings`
- :mod:`lino_xl.lib.sales.fixtures.demo_bookings`
- :mod:`lino_xl.lib.invoicing.fixtures.demo_bookings`
"""
from __future__ import unicode_literals
import datetime
from dateutil.relativedelta import relativedelta as delta
from decimal import Decimal
from django.conf import settings
from lino.utils import Cycler
from lino.utils.dates import AMONTH
from lino.api import dd, rt
from lino_xl.lib.vat.mixins import myround
# from lino.core.requests import BaseRequest
REQUEST = settings.SITE.login() # BaseRequest()
MORE_THAN_A_MONTH = datetime.timedelta(days=40)
from lino_xl.lib.vat.choicelists import VatAreas, VatRules
from lino_xl.lib.ledger.choicelists import TradeTypes
def objects():
Journal = rt.models.ledger.Journal
PaymentTerm = rt.models.ledger.PaymentTerm
Company = rt.models.contacts.Company
USERS = Cycler(settings.SITE.user_model.objects.all())
def func():
# qs = Company.objects.filter(sepa_accounts__iban__isnull=False)
qs = Company.objects.exclude(vat_regime='').filter(
country__isnull=False)
for p in qs.order_by('id'):
# if Journal.objects.filter(partner=p).exists():
# continue
# if not p.vat_regime:
# continue
va = VatAreas.get_for_country(p.country)
if va is None:
continue
rule = VatRules.get_vat_rule(
va, TradeTypes.purchases, p.vat_regime, default=False)
if rule:
yield p
PROVIDERS = Cycler(func())
if len(PROVIDERS) == 0:
raise Exception("No providers.")
JOURNAL_P = Journal.objects.get(ref="PRC")
if dd.is_installed('ana'):
ANA_ACCS = Cycler(rt.models.ana.Account.objects.all())
ACCOUNTS = Cycler(JOURNAL_P.get_allowed_accounts())
AMOUNTS = Cycler([Decimal(x) for x in
"20 29.90 39.90 99.95 199.95 599.95 1599.99".split()])
AMOUNT_DELTAS = Cycler([Decimal(x)
for x in "0 0.60 1.10 1.30 2.50".split()])
DATE_DELTAS = Cycler((1, 2, 3, 4, 5, 6, 7))
INFLATION_RATE = Decimal("0.02")
""""purchase stories" : each story represents a provider who sends
monthly invoices.
"""
PURCHASE_STORIES = []
for i in range(7):
# provider, (account,amount)
story = (PROVIDERS.pop(), [])
story[1].append((ACCOUNTS.pop(), AMOUNTS.pop()))
if i % 3:
story[1].append((ACCOUNTS.pop(), AMOUNTS.pop()))
PURCHASE_STORIES.append(story)
START_YEAR = dd.plugins.ledger.start_year
date = datetime.date(START_YEAR, 1, 1)
end_date = settings.SITE.demo_date(-10) # + delta(years=-2)
# end_date = datetime.date(START_YEAR+1, 5, 1)
# print(20151216, START_YEAR, settings.SITE.demo_date(), end_date - date)
PAYMENT_TERMS = Cycler(PaymentTerm.objects.all())
if len(PAYMENT_TERMS) == 0:
raise Exception("No PAYMENT_TERMS.")
while date < end_date:
for story in PURCHASE_STORIES:
vd = date + delta(days=DATE_DELTAS.pop())
if dd.is_installed('ana'):
cl = rt.models.ana.AnaAccountInvoice
else:
cl = rt.models.vat.VatAccountInvoice
invoice = cl(
journal=JOURNAL_P, partner=story[0], user=USERS.pop(),
voucher_date=vd,
payment_term=PAYMENT_TERMS.pop(),
entry_date=vd + delta(days=1))
yield invoice
for account, amount in story[1]:
kwargs = dict()
if dd.is_installed('ana'):
if account.needs_ana:
kwargs.update(ana_account=ANA_ACCS.pop())
model = rt.models.ana.InvoiceItem
else:
model = rt.models.vat.InvoiceItem
amount += amount + \
(amount * INFLATION_RATE * (date.year - START_YEAR))
item = model(voucher=invoice,
account=account,
total_incl=myround(amount) +
AMOUNT_DELTAS.pop(), **kwargs)
try:
item.total_incl_changed(REQUEST)
except Exception as e:
msg = "20171006 {} in ({} {!r})".format(
e, invoice.partner, invoice.vat_regime)
# raise Exception(msg)
dd.logger.warning(msg)
else:
item.before_ui_save(REQUEST)
yield item
invoice.register(REQUEST)
invoice.save()
date += AMONTH
|
bsd-2-clause
| -259,979,154,959,346,780
| 33.697183
| 77
| 0.56505
| false
| 3.62813
| false
| false
| false
|
AeroNotix/django-timetracker
|
tracker/management/commands/approval_reminders.py
|
1
|
1376
|
'''
Simple module to aid in command-line debugging of notification related issues.
'''
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.mail import EmailMessage
from timetracker.overtime.models import PendingApproval, Tbluser
def send_approval_digest(market):
approvals = PendingApproval.objects.filter(closed=False, approver__market=market)
if not len(approvals):
return
if len({entry.approver for entry in approvals}) > 1:
error_log.critical(
"Cannot send e-mails as a clear approval chain cannot be established."
)
return
message = "Hi,\n\n" \
"You have %d approvals pending in the timetracker." \
"\n\n" \
"Kind Regards,\n" \
"Timetracker team"
message = message % len(approvals)
email = EmailMessage(from_email='timetracker@unmonitored.com')
email.body = message
email.to = approvals[0].entry.user.get_manager_email()
email.subject = "Pending Approvals in the Timetracker."
email.send()
class Command(BaseCommand):
def handle(self, *args, **options):
for market in Tbluser.MARKET_CHOICES:
if settings.SENDING_APPROVAL_DIGESTS.get(market[0]):
send_approval_digest(market[0])
|
bsd-3-clause
| 6,090,599,434,145,423,000
| 31.761905
| 86
| 0.646802
| false
| 3.988406
| false
| false
| false
|
digitalhealthhack/is_it_good_for_me
|
data_scripts/get_studies.py
|
1
|
3019
|
from requests import get as get_page
from bs4 import BeautifulSoup
import csv
import codecs
import cStringIO
TOPIC = 'chocolate'
def _get_study_url(url):
# Receives the url of a cochrane search result and returns the url for the
# study
result_page = get_page(url)
result_soup = BeautifulSoup(result_page.text)
study_url = result_soup \
.find(id='node_review_full_group_research') \
.find_all('a')[0] \
.get('href')
return study_url
def _get_info_for_study(study_url):
study_page = get_page(study_url)
soup = BeautifulSoup(study_page.text)
study_title = soup.find(class_='articleTitle').span.text
abstract_html = soup.find(id='mrwFulltext').div.find_all(['p', 'div'])
abstract_text = u''
authors_conclusions = u''
is_capturing = False
for html in abstract_html:
if is_capturing and html.name != 'p' and html.text != 'Authors\' conclusions':
is_capturing = False
break
abstract_text += unicode(html.text)
if is_capturing:
authors_conclusions += unicode(html.text)
if html.name != 'p' and html.text == 'Authors\' conclusions':
is_capturing = True
return (study_title, authors_conclusions, abstract_text)
def main(search_query=''):
req = get_page(
'http://summaries.cochrane.org/search/site/{}'.format(search_query),
)
soup = BeautifulSoup(req.text)
results = soup.find_all(class_='search-result')
studies = []
for result in results:
result_url = result.a.get('href')
if result_url:
study_url = _get_study_url(result_url)
study_title, study_conclusion, study_abstract = \
_get_info_for_study(study_url)
studies.append([study_title, study_conclusion, study_abstract])
filename = 'studies.csv'
with open(filename, 'w') as csv_file:
for study in studies:
spamwriter = UnicodeWriter(csv_file)
spamwriter.writerow(study)
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
main(TOPIC)
|
mit
| -463,493,121,653,917,700
| 27.752381
| 86
| 0.613448
| false
| 3.659394
| false
| false
| false
|
EliCDavis/PyChart
|
src/ColorSelector.py
|
1
|
1167
|
import math
import random
class ColorSelector:
def __init__(self):
self._colors_for_selection = ['F94F48', 'FF6A41', 'B4B4B4', 'D5D5D5', 'E973F5', '237FEA',
'F2B838', '19EC5A', '2395DE', 'D4B57F', 'FFD700']
self._colors_already_selected = []
def get_random_color(self):
index = math.floor(random.random()*len(self._colors_for_selection))
index_has_been_found = False
# Keep trying to find an index until we're successful
# TODO this needs to be way more efficient
while index_has_been_found is False:
if index not in self._colors_already_selected:
index_has_been_found = True
else:
index = math.floor(random.random()*len(self._colors_for_selection))
# Finally get our color
color = self._colors_for_selection[index]
self._colors_already_selected.append(index)
# If we've used all the colors then start all over
if len(self._colors_already_selected) == len(self._colors_for_selection):
self._colors_already_selected = []
return color
|
mit
| -6,649,144,329,297,534,000
| 28.175
| 97
| 0.59126
| false
| 3.728435
| false
| false
| false
|
justb4/GeoHealthCheck
|
GeoHealthCheck/app.py
|
1
|
38830
|
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Just van den Broecke <justb4@gmail.com>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import base64
import csv
import json
import logging
from io import StringIO
from flask import (abort, flash, g, jsonify, redirect,
render_template, request, url_for)
from flask_babel import gettext
from flask_login import (LoginManager, login_user, logout_user,
current_user, login_required)
from flask_migrate import Migrate
from itertools import chain
import views
from __init__ import __version__
from enums import RESOURCE_TYPES
from factory import Factory
from init import App
from models import Resource, Run, ProbeVars, CheckVars, Tag, User, Recipient
from resourceauth import ResourceAuth
from util import send_email, geocode, format_checked_datetime, \
format_run_status, format_obj_value
# Module globals for convenience
LOGGER = logging.getLogger(__name__)
APP = App.get_app()
CONFIG = App.get_config()
DB = App.get_db()
BABEL = App.get_babel()
MIGRATE = Migrate(APP, DB)
LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.init_app(APP)
LANGUAGES = (
('en', 'English'),
('fr', 'Français'),
('de', 'German'),
('nl_NL', 'Nederlands (Nederland)'),
('es_BO', 'Español (Bolivia)'),
('hr_HR', 'Croatian (Croatia)')
)
# Should GHC Runner be run within GHC webapp?
if CONFIG['GHC_RUNNER_IN_WEBAPP'] is True:
LOGGER.info('Running GHC Scheduler in WebApp')
from scheduler import start_schedule
# Start scheduler
start_schedule()
else:
LOGGER.info('NOT Running GHC Scheduler in WebApp')
# commit or rollback shorthand
def db_commit():
err = None
try:
DB.session.commit()
except Exception:
DB.session.rollback()
# finally:
# DB.session.close()
return err
@APP.before_request
def before_request():
g.user = current_user
if request.args and 'lang' in request.args and request.args['lang'] != '':
g.current_lang = request.args['lang']
if not hasattr(g, 'current_lang'):
g.current_lang = 'en'
if CONFIG['GHC_REQUIRE_WEBAPP_AUTH'] is True:
# Login is required to access GHC Webapp.
# We need to pass-through static resources like CSS.
if any(['/static/' in request.path,
request.path.endswith('.ico'),
g.user.is_authenticated(), # This is from Flask-Login
(request.endpoint is not None
and getattr(APP.view_functions[request.endpoint],
'is_public', False))]):
return # Access granted
else:
return redirect(url_for('login'))
# Marks (endpoint-) function as always to be accessible
# (used for GHC_REQUIRE_WEBAPP_AUTH)
def public_route(decorated_function):
decorated_function.is_public = True
return decorated_function
@APP.teardown_appcontext
def shutdown_session(exception=None):
DB.session.remove()
@BABEL.localeselector
def get_locale():
return g.get('current_lang', 'en')
# return request.accept_languages.best_match(LANGUAGES.keys())
@LOGIN_MANAGER.user_loader
def load_user(identifier):
return User.query.get(int(identifier))
@LOGIN_MANAGER.unauthorized_handler
def unauthorized_callback():
if request.query_string:
url = '%s%s?%s' % (request.script_root, request.path,
request.query_string)
else:
url = '%s%s' % (request.script_root, request.path)
return redirect(url_for('login', lang=g.current_lang, next=url))
@LOGIN_MANAGER.request_loader
def load_user_from_request(request):
# Try to login using Basic Auth
# Inspiration: https://flask-login.readthedocs.io
# /en/latest/#custom-login-using-request-loader
basic_auth_val = request.headers.get('Authorization')
if basic_auth_val:
basic_auth_val = basic_auth_val.replace('Basic ', '', 1)
authenticated = False
try:
username, password = base64.b64decode(basic_auth_val).split(':')
user = User.query.filter_by(username=username).first()
if user:
authenticated = user.authenticate(password)
finally:
# Ignore errors, they should all fail the auth attempt
pass
if not authenticated:
LOGGER.warning('Unauthorized access for user=%s' % username)
abort(401)
else:
return user
# TODO: may add login via api-key or token here
# finally, return None if both methods did not login the user
return None
@APP.template_filter('cssize_reliability')
def cssize_reliability(value, css_type=None):
"""returns CSS button class snippet based on score"""
number = int(value)
if CONFIG['GHC_RELIABILITY_MATRIX']['red']['min'] <= number <= \
CONFIG['GHC_RELIABILITY_MATRIX']['red']['max']:
score = 'danger'
panel = 'red'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['orange']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['orange']['max']):
score = 'warning'
panel = 'yellow'
elif (CONFIG['GHC_RELIABILITY_MATRIX']['green']['min'] <= number <=
CONFIG['GHC_RELIABILITY_MATRIX']['green']['max']):
score = 'success'
panel = 'green'
else: # should never really get here
score = 'info'
panel = 'blue'
if css_type is not None and css_type == 'panel':
return panel
else:
return score
@APP.template_filter('cssize_reliability2')
def cssize_reliability2(value):
"""returns CSS panel class snippet based on score"""
return cssize_reliability(value, 'panel')
@APP.template_filter('round2')
def round2(value):
"""rounds a number to 2 decimal places except for values of 0 or 100"""
if value in [0.0, 100.0]:
return int(value)
return round(value, 2)
@APP.context_processor
def context_processors():
"""global context processors for templates"""
rtc = views.get_resource_types_counts()
tags = views.get_tag_counts()
return {
'app_version': __version__,
'resource_types': RESOURCE_TYPES,
'resource_types_counts': rtc['counts'],
'resources_total': rtc['total'],
'languages': LANGUAGES,
'tags': tags,
'tagnames': list(tags.keys())
}
@APP.route('/')
def home():
"""homepage"""
response = views.get_health_summary()
return render_template('home.html', response=response)
@APP.route('/csv', endpoint='csv')
@APP.route('/json', endpoint='json')
def export():
"""export resource list as JSON"""
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
query = request.args.get('q')
response = views.list_resources(resource_type, query)
if request.url_rule.rule == '/json':
json_dict = {'total': response['total'], 'resources': []}
for r in response['resources']:
try:
ghc_url = '%s/resource/%s' % \
(CONFIG['GHC_SITE_URL'], r.identifier)
last_run_report = '-'
if r.last_run:
last_run_report = r.last_run.report
json_dict['resources'].append({
'resource_type': r.resource_type,
'title': r.title,
'url': r.url,
'ghc_url': ghc_url,
'ghc_json': '%s/json' % ghc_url,
'ghc_csv': '%s/csv' % ghc_url,
'first_run': format_checked_datetime(r.first_run),
'last_run': format_checked_datetime(r.last_run),
'status': format_run_status(r.last_run),
'min_response_time': round(r.min_response_time, 2),
'average_response_time': round(r.average_response_time, 2),
'max_response_time': round(r.max_response_time, 2),
'reliability': round(r.reliability, 2),
'last_report': format_obj_value(last_run_report)
})
except Exception as e:
LOGGER.warning(
'JSON error resource id=%d: %s' % (r.identifier, str(e)))
return jsonify(json_dict)
elif request.url_rule.rule == '/csv':
output = StringIO()
writer = csv.writer(output)
header = [
'resource_type', 'title', 'url', 'ghc_url', 'ghc_json', 'ghc_csv',
'first_run', 'last_run', 'status', 'min_response_time',
'average_response_time', 'max_response_time', 'reliability'
]
writer.writerow(header)
for r in response['resources']:
try:
ghc_url = '%s%s' % (CONFIG['GHC_SITE_URL'],
url_for('get_resource_by_id',
identifier=r.identifier))
writer.writerow([
r.resource_type,
r.title,
r.url,
ghc_url,
'%s/json' % ghc_url,
'%s/csv' % ghc_url,
format_checked_datetime(r.first_run),
format_checked_datetime(r.last_run),
format_run_status(r.last_run),
round(r.min_response_time, 2),
round(r.average_response_time, 2),
round(r.max_response_time, 2),
round(r.reliability, 2)
])
except Exception as e:
LOGGER.warning(
'CSV error resource id=%d: %s' % (r.identifier, str(e)))
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/opensearch')
def opensearch():
"""generate OpenSearch description document"""
content = render_template('opensearch_description.xml')
return content, 200, {'Content-type': 'text/xml'}
@APP.route('/resource/<identifier>/csv', endpoint='csv-resource')
@APP.route('/resource/<identifier>/json', endpoint='json-resource')
def export_resource(identifier):
"""export resource as JSON or CSV"""
resource = views.get_resource_by_id(identifier)
history_csv = '%s/resource/%s/history/csv' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
history_json = '%s/resource/%s/history/json' % (CONFIG['GHC_SITE_URL'],
resource.identifier)
if 'json' in request.url_rule.rule:
last_run_report = '-'
if resource.last_run:
last_run_report = resource.last_run.report
json_dict = {
'identifier': resource.identifier,
'title': resource.title,
'url': resource.url,
'resource_type': resource.resource_type,
'owner': resource.owner.username,
'min_response_time': resource.min_response_time,
'average_response_time': resource.average_response_time,
'max_response_time': resource.max_response_time,
'reliability': resource.reliability,
'status': format_run_status(resource.last_run),
'first_run': format_checked_datetime(resource.first_run),
'last_run': format_checked_datetime(resource.last_run),
'history_csv': history_csv,
'history_json': history_json,
'last_report': format_obj_value(last_run_report)
}
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'identifier', 'title', 'url', 'resource_type', 'owner',
'min_response_time', 'average_response_time', 'max_response_time',
'reliability', 'status', 'first_run', 'last_run', 'history_csv',
'history_json'
]
writer.writerow(header)
writer.writerow([
resource.identifier,
resource.title,
resource.url,
resource.resource_type,
resource.owner.username,
resource.min_response_time,
resource.average_response_time,
resource.max_response_time,
resource.reliability,
format_run_status(resource.last_run),
format_checked_datetime(resource.first_run),
format_checked_datetime(resource.last_run),
history_csv,
history_json
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/resource/<identifier>/history/csv',
endpoint='csv-resource-history')
@APP.route('/resource/<identifier>/history/json',
endpoint='json-resource-history')
def export_resource_history(identifier):
"""export resource history as JSON or CSV"""
resource = views.get_resource_by_id(identifier)
if 'json' in request.url_rule.rule:
json_dict = {'runs': []}
for run in resource.runs:
json_dict['runs'].append({
'owner': resource.owner.username,
'resource_type': resource.resource_type,
'checked_datetime': format_checked_datetime(run),
'title': resource.title,
'url': resource.url,
'response_time': round(run.response_time, 2),
'status': format_run_status(run)
})
return jsonify(json_dict)
elif 'csv' in request.url_rule.rule:
output = StringIO()
writer = csv.writer(output)
header = [
'owner', 'resource_type', 'checked_datetime', 'title', 'url',
'response_time', 'status'
]
writer.writerow(header)
for run in resource.runs:
writer.writerow([
resource.owner.username,
resource.resource_type,
format_checked_datetime(run),
resource.title,
resource.url,
round(run.response_time, 2),
format_run_status(run),
])
return output.getvalue(), 200, {'Content-type': 'text/csv'}
@APP.route('/settings')
def settings():
"""settings"""
pass
@APP.route('/resources')
def resources():
"""lists resources with optional filter"""
resource_type = None
if request.args.get('resource_type') in RESOURCE_TYPES.keys():
resource_type = request.args['resource_type']
tag = request.args.get('tag')
query = request.args.get('q')
response = views.list_resources(resource_type, query, tag)
return render_template('resources.html', response=response)
@APP.route('/resource/<identifier>')
def get_resource_by_id(identifier):
"""show resource"""
response = views.get_resource_by_id(identifier)
return render_template('resource.html', resource=response)
@APP.route('/register', methods=['GET', 'POST'])
def register():
"""register a new user"""
if not CONFIG['GHC_SELF_REGISTER']:
msg1 = gettext('This site is not configured for self-registration')
msg2 = gettext('Please contact')
msg = '%s. %s %s' % (msg1, msg2,
CONFIG['GHC_ADMIN_EMAIL'])
flash('%s' % msg, 'danger')
return render_template('register.html', errmsg=msg)
if request.method == 'GET':
return render_template('register.html')
# Check for existing user or email
user = User.query.filter_by(username=request.form['username']).first()
email = User.query.filter_by(email=request.form['email']).first()
if user or email:
flash('%s' % gettext('Invalid username or email'), 'danger')
return render_template('register.html')
user = User(request.form['username'],
request.form['password'], request.form['email'])
DB.session.add(user)
try:
DB.session.commit()
except Exception as err:
DB.session.rollback()
bad_column = err.message.split()[2]
bad_value = request.form[bad_column]
msg = gettext('already registered')
flash('%s %s %s' % (bad_column, bad_value, msg), 'danger')
return redirect(url_for('register', lang=g.current_lang))
return redirect(url_for('login', lang=g.current_lang))
@APP.route('/add', methods=['GET', 'POST'])
@login_required
def add():
"""add resource"""
if not g.user.is_authenticated():
return render_template('add.html')
if request.method == 'GET':
return render_template('add.html')
resource_type = request.form['resource_type']
tags = request.form.getlist('tags')
url = request.form['url'].strip()
resources_to_add = []
from healthcheck import sniff_test_resource, run_test_resource
sniffed_resources = sniff_test_resource(CONFIG, resource_type, url)
if not sniffed_resources:
msg = gettext("No resources detected")
LOGGER.exception()
flash(msg, 'danger')
for (resource_type, resource_url,
title, success, response_time,
message, start_time, resource_tags,) in sniffed_resources:
tags_to_add = []
for tag in chain(tags, resource_tags):
tag_obj = tag
if not isinstance(tag, Tag):
tag_obj = Tag.query.filter_by(name=tag).first()
if tag_obj is None:
tag_obj = Tag(name=tag)
tags_to_add.append(tag_obj)
resource_to_add = Resource(current_user,
resource_type,
title,
resource_url,
tags=tags_to_add)
resources_to_add.append(resource_to_add)
probe_to_add = None
checks_to_add = []
# Always add a default Probe and Check(s)
# from the GHC_PROBE_DEFAULTS conf
if resource_type in CONFIG['GHC_PROBE_DEFAULTS']:
resource_settings = CONFIG['GHC_PROBE_DEFAULTS'][resource_type]
probe_class = resource_settings['probe_class']
if probe_class:
# Add the default Probe
probe_obj = Factory.create_obj(probe_class)
probe_to_add = ProbeVars(
resource_to_add, probe_class,
probe_obj.get_default_parameter_values())
# Add optional default (parameterized)
# Checks to add to this Probe
checks_info = probe_obj.get_checks_info()
checks_param_info = probe_obj.get_plugin_vars()['CHECKS_AVAIL']
for check_class in checks_info:
check_param_info = checks_param_info[check_class]
if 'default' in checks_info[check_class]:
if checks_info[check_class]['default']:
# Filter out params for Check with fixed values
param_defs = check_param_info['PARAM_DEFS']
param_vals = {}
for param in param_defs:
if param_defs[param]['value']:
param_vals[param] = \
param_defs[param]['value']
check_vars = CheckVars(
probe_to_add, check_class, param_vals)
checks_to_add.append(check_vars)
result = run_test_resource(resource_to_add)
run_to_add = Run(resource_to_add, result)
DB.session.add(resource_to_add)
# prepopulate notifications for current user
resource_to_add.set_recipients('email', [g.user.email])
if probe_to_add:
DB.session.add(probe_to_add)
for check_to_add in checks_to_add:
DB.session.add(check_to_add)
DB.session.add(run_to_add)
try:
DB.session.commit()
msg = gettext('Services registered')
flash('%s (%s, %s)' % (msg, resource_type, url), 'success')
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for('home', lang=g.current_lang))
if len(resources_to_add) == 1:
return edit_resource(resources_to_add[0].identifier)
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/resource/<int:resource_identifier>/update', methods=['POST'])
@login_required
def update(resource_identifier):
"""update a resource"""
update_counter = 0
status = 'success'
try:
resource_identifier_dict = request.get_json()
resource = Resource.query.filter_by(
identifier=resource_identifier).first()
for key, value in resource_identifier_dict.items():
if key == 'tags':
resource_tags = [t.name for t in resource.tags]
tags_to_add = set(value) - set(resource_tags)
tags_to_delete = set(resource_tags) - set(value)
# Existing Tags: create relation else add new Tag
all_tag_objs = Tag.query.all()
for tag in tags_to_add:
tag_add_obj = None
for tag_obj in all_tag_objs:
if tag == tag_obj.name:
# use existing
tag_add_obj = tag_obj
break
if not tag_add_obj:
# add new
tag_add_obj = Tag(name=tag)
DB.session.add(tag_add_obj)
resource.tags.append(tag_add_obj)
for tag in tags_to_delete:
tag_to_delete = Tag.query.filter_by(name=tag).first()
resource.tags.remove(tag_to_delete)
update_counter += 1
elif key == 'probes':
# Remove all existing ProbeVars for Resource
for probe_var in resource.probe_vars:
resource.probe_vars.remove(probe_var)
# Add ProbeVars anew each with optional CheckVars
for probe in value:
LOGGER.info('adding Probe class=%s parms=%s' %
(probe['probe_class'], str(probe)))
probe_vars = ProbeVars(resource, probe['probe_class'],
probe['parameters'])
for check in probe['checks']:
check_vars = CheckVars(
probe_vars, check['check_class'],
check['parameters'])
probe_vars.check_vars.append(check_vars)
resource.probe_vars.append(probe_vars)
update_counter += 1
elif key == 'notify_emails':
resource.set_recipients('email',
[v for v in value if v.strip()])
elif key == 'notify_webhooks':
resource.set_recipients('webhook',
[v for v in value if v.strip()])
elif key == 'auth':
resource.auth = value
elif getattr(resource, key) != resource_identifier_dict[key]:
# Update other resource attrs, mainly 'name'
setattr(resource, key, resource_identifier_dict[key])
min_run_freq = CONFIG['GHC_MINIMAL_RUN_FREQUENCY_MINS']
if int(resource.run_frequency) < min_run_freq:
resource.run_frequency = min_run_freq
update_counter += 1
# Always update geo-IP: maybe failure on creation or
# IP-address of URL may have changed.
latitude, longitude = geocode(resource.url)
if latitude != 0.0 and longitude != 0.0:
# Only update for valid lat/lon
resource.latitude = latitude
resource.longitude = longitude
update_counter += 1
except Exception as err:
LOGGER.error("Cannot update resource: %s", err, exc_info=err)
DB.session.rollback()
status = str(err)
update_counter = 0
# finally:
# DB.session.close()
if update_counter > 0:
err = db_commit()
if err:
status = str(err)
return jsonify({'status': status})
@APP.route('/resource/<int:resource_identifier>/test', methods=['GET', 'POST'])
@login_required
def test(resource_identifier):
"""test a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
from healthcheck import run_test_resource
result = run_test_resource(
resource)
if request.method == 'GET':
if result.message == 'Skipped':
msg = gettext('INFO')
flash('%s: %s' % (msg, result.message), 'info')
elif result.message not in ['OK', None, 'None']:
msg = gettext('ERROR')
flash('%s: %s' % (msg, result.message), 'danger')
else:
flash(gettext('Resource tested successfully'), 'success')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
elif request.method == 'POST':
return jsonify(result.get_report())
@APP.route('/resource/<int:resource_identifier>/edit')
@login_required
def edit_resource(resource_identifier):
"""edit a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(request.referrer)
probes_avail = views.get_probes_avail(resource.resource_type, resource)
suggestions = json.dumps(Recipient.get_suggestions('email',
g.user.username))
return render_template('edit_resource.html',
lang=g.current_lang,
resource=resource,
suggestions=suggestions,
auths_avail=ResourceAuth.get_auth_defs(),
probes_avail=probes_avail)
@APP.route('/resource/<int:resource_identifier>/delete')
@login_required
def delete(resource_identifier):
"""delete a resource"""
resource = Resource.query.filter_by(identifier=resource_identifier).first()
if g.user.role != 'admin' and g.user.username != resource.owner.username:
msg = gettext('You do not have access to delete this resource')
flash(msg, 'danger')
return redirect(url_for('get_resource_by_id', lang=g.current_lang,
identifier=resource_identifier))
if resource is None:
flash(gettext('Resource not found'), 'danger')
return redirect(url_for('home', lang=g.current_lang))
resource.clear_recipients()
DB.session.delete(resource)
try:
DB.session.commit()
flash(gettext('Resource deleted'), 'success')
return redirect(url_for('home', lang=g.current_lang))
except Exception as err:
DB.session.rollback()
flash(str(err), 'danger')
return redirect(url_for(request.referrer))
@APP.route('/probe/<string:probe_class>/<int:resource_identifier>/edit_form')
@APP.route('/probe/<string:probe_class>/edit_form')
@login_required
def get_probe_edit_form(probe_class, resource_identifier=None):
"""get the form to edit a Probe"""
probe_obj = Factory.create_obj(probe_class)
if resource_identifier:
resource = views.get_resource_by_id(resource_identifier)
if resource:
probe_obj._resource = resource
probe_obj.expand_params(resource)
probe_info = probe_obj.get_plugin_vars()
probe_vars = ProbeVars(
None, probe_class, probe_obj.get_default_parameter_values())
# Get only the default Checks for this Probe class
checks_avail = probe_obj.get_checks_info_defaults()
checks_avail = probe_obj.expand_check_vars(checks_avail)
for check_class in checks_avail:
check_obj = Factory.create_obj(check_class)
check_params = check_obj.get_default_parameter_values()
probe_check_param_defs = \
probe_info['CHECKS_AVAIL'][check_class]['PARAM_DEFS']
for param in probe_check_param_defs:
if 'value' in probe_check_param_defs[param]:
check_params[param] = probe_check_param_defs[param]['value']
# Appends 'check_vars' to 'probe_vars' (SQLAlchemy)
CheckVars(probe_vars, check_class, check_params)
return render_template('includes/probe_edit_form.html',
lang=g.current_lang,
probe=probe_vars, probe_info=probe_info)
@APP.route('/check/<string:check_class>/edit_form')
@login_required
def get_check_edit_form(check_class):
"""get the form to edit a Check"""
check_obj = Factory.create_obj(check_class)
check_info = check_obj.get_plugin_vars()
check_vars = CheckVars(
None, check_class, check_obj.get_default_parameter_values())
return render_template('includes/check_edit_form.html',
lang=g.current_lang,
check=check_vars, check_info=check_info)
@APP.route('/login', methods=['GET', 'POST'])
@public_route
def login():
"""login"""
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
registered_user = User.query.filter_by(username=username).first()
authenticated = False
if registered_user:
# May not have upgraded to pw encryption: warn
if len(registered_user.password) < 80:
msg = 'Please upgrade GHC to encrypted passwords first, see docs!'
flash(gettext(msg), 'danger')
return redirect(url_for('login', lang=g.current_lang))
try:
authenticated = registered_user.authenticate(password)
finally:
pass
if not authenticated:
flash(gettext('Invalid username and / or password'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
# Login ok
login_user(registered_user)
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/logout')
def logout():
"""logout"""
logout_user()
flash(gettext('Logged out'), 'success')
if request.referrer:
return redirect(request.referrer)
else:
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset_req', methods=['GET', 'POST'])
@public_route
def reset_req():
"""
Reset password request handling.
"""
if request.method == 'GET':
return render_template('reset_password_request.html')
# Reset request form with email
email = request.form['email']
registered_user = User.query.filter_by(email=email).first()
if registered_user is None:
LOGGER.warn('Invalid email for reset_req: %s' % email)
flash(gettext('Invalid email'), 'danger')
return redirect(url_for('reset_req', lang=g.current_lang))
# Generate reset url using user-specific token
token = registered_user.get_token()
reset_url = '%s/reset/%s' % (CONFIG['GHC_SITE_URL'], token)
# Create message body with reset link
msg_body = render_template('reset_password_email.txt',
lang=g.current_lang, config=CONFIG,
reset_url=reset_url,
username=registered_user.username)
try:
from email.mime.text import MIMEText
from email.utils import formataddr
msg = MIMEText(msg_body, 'plain', 'utf-8')
msg['From'] = formataddr((CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL']))
msg['To'] = registered_user.email
msg['Subject'] = '[%s] %s' % (CONFIG['GHC_SITE_TITLE'],
gettext('reset password'))
from_addr = '%s <%s>' % (CONFIG['GHC_SITE_TITLE'],
CONFIG['GHC_ADMIN_EMAIL'])
to_addr = registered_user.email
msg_text = msg.as_string()
send_email(CONFIG['GHC_SMTP'], from_addr, to_addr, msg_text)
except Exception as err:
msg = 'Cannot send email. Contact admin: '
LOGGER.warn(msg + ' err=' + str(err))
flash(gettext(msg) + CONFIG['GHC_ADMIN_EMAIL'], 'danger')
return redirect(url_for('login', lang=g.current_lang))
flash(gettext('Password reset link sent via email'), 'success')
if 'next' in request.args:
return redirect(request.args.get('next'))
return redirect(url_for('home', lang=g.current_lang))
@APP.route('/reset/<token>', methods=['GET', 'POST'])
@public_route
def reset(token=None):
"""
Reset password submit form handling.
"""
# Must have at least a token to proceed.
if token is None:
return redirect(url_for('reset_req', lang=g.current_lang))
# Token received: verify if ok, may also time-out.
registered_user = User.verify_token(token)
if registered_user is None:
LOGGER.warn('Cannot find User from token: %s' % token)
flash(gettext('Invalid token'), 'danger')
return redirect(url_for('login', lang=g.current_lang))
# Token and user ok: return reset form.
if request.method == 'GET':
return render_template('reset_password_form.html')
# Valid token and user: change password from form-value
password = request.form['password']
if not password:
flash(gettext('Password required'), 'danger')
return redirect(url_for('reset/%s' % token, lang=g.current_lang))
registered_user.set_password(password)
DB.session.add(registered_user)
try:
DB.session.commit()
flash(gettext('Update password OK'), 'success')
except Exception as err:
msg = 'Update password failed!'
LOGGER.warn(msg + ' err=' + str(err))
DB.session.rollback()
flash(gettext(msg), 'danger')
# Finally redirect user to login page
return redirect(url_for('login', lang=g.current_lang))
#
# REST Interface Calls
#
@APP.route('/api/v1.0/summary')
@APP.route('/api/v1.0/summary/')
@APP.route('/api/v1.0/summary.<content_type>')
def api_summary(content_type='json'):
"""
Get health summary for all Resources within this instance.
"""
health_summary = views.get_health_summary()
# Convert Runs to dict-like structure
for run in ['first_run', 'last_run']:
run_obj = health_summary.get(run, None)
if run_obj:
health_summary[run] = run_obj.for_json()
# Convert Resources failing to dict-like structure
failed_resources = []
for resource in health_summary['failed_resources']:
failed_resources.append(resource.for_json())
health_summary['failed_resources'] = failed_resources
if content_type == 'json':
result = jsonify(health_summary)
else:
result = '<pre>\n%s\n</pre>' % \
render_template('status_report_email.txt',
lang=g.current_lang, summary=health_summary)
return result
@APP.route('/api/v1.0/probes-avail/')
@APP.route('/api/v1.0/probes-avail/<resource_type>')
@APP.route('/api/v1.0/probes-avail/<resource_type>/<int:resource_id>')
def api_probes_avail(resource_type=None, resource_id=None):
"""
Get available (configured) Probes for this
installation, optional for resource type
"""
resource = None
if resource_id:
resource = views.get_resource_by_id(resource_id)
probes = views.get_probes_avail(resource_type=resource_type,
resource=resource)
return jsonify(probes)
@APP.route('/api/v1.0/runs/<int:resource_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>.<content_type>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>')
@APP.route('/api/v1.0/runs/<int:resource_id>/<int:run_id>.<content_type>')
def api_runs(resource_id, run_id=None, content_type='json'):
"""
Get Runs (History of results) for Resource.
"""
if run_id:
runs = [views.get_run_by_id(run_id)]
else:
runs = views.get_run_by_resource_id(resource_id)
run_arr = []
for run in runs:
run_dict = {
'id': run.identifier,
'success': run.success,
'response_time': run.response_time,
'checked_datetime': run.checked_datetime,
'message': run.message,
'report': run.report
}
run_arr.append(run_dict)
runs_dict = {'total': len(run_arr), 'runs': run_arr}
result = 'unknown'
if content_type == 'json':
result = jsonify(runs_dict)
elif content_type == 'html':
result = render_template('includes/runs.html',
lang=g.current_lang, runs=runs_dict['runs'])
return result
if __name__ == '__main__': # run locally, for fun
import sys
HOST = '0.0.0.0'
PORT = 8000
if len(sys.argv) > 1:
HOST, PORT = sys.argv[1].split(':')
APP.run(host=HOST, port=int(PORT), use_reloader=True, debug=True)
|
mit
| 1,309,314,165,383,930,400
| 34.3303
| 79
| 0.579298
| false
| 3.971361
| true
| false
| false
|
openstates/openstates
|
openstates/az/__init__.py
|
1
|
15461
|
import lxml.html
import re
import requests
from openstates.utils import State
from .people import AZPersonScraper
from .bills import AZBillScraper
# from .committees import AZCommitteeScraper
# from .events import AZEventScraper
class Arizona(State):
scrapers = {
"people": AZPersonScraper,
# 'committees': AZCommitteeScraper,
# 'events': AZEventScraper,
"bills": AZBillScraper,
}
legislative_sessions = [
{
"_scraped_name": "2009 - Forty-ninth Legislature - First Regular Session",
"classification": "primary",
"end_date": "2009-07-01",
"identifier": "49th-1st-regular",
"name": "49th Legislature, 1st Regular Session (2009)",
"start_date": "2009-01-12",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - First Special Session",
"classification": "special",
"end_date": "2009-01-31",
"identifier": "49th-1st-special",
"name": "49th Legislature, 1st Special Session (2009)",
"start_date": "2009-01-28",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Second Regular Session",
"classification": "primary",
"end_date": "2010-04-29",
"identifier": "49th-2nd-regular",
"name": "49th Legislature, 2nd Regular Session (2010)",
"start_date": "2010-01-11",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Second Special Session",
"classification": "special",
"end_date": "2009-05-27",
"identifier": "49th-2nd-special",
"name": "49th Legislature, 2nd Special Session (2009)",
"start_date": "2009-05-21",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Third Special Session",
"classification": "special",
"end_date": "2009-08-25",
"identifier": "49th-3rd-special",
"name": "49th Legislature, 3rd Special Session (2009)",
"start_date": "2009-07-06",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Fourth Special Session",
"classification": "special",
"end_date": "2009-11-23",
"identifier": "49th-4th-special",
"name": "49th Legislature, 4th Special Session (2009)",
"start_date": "2009-11-17",
},
{
"_scraped_name": "2009 - Forty-ninth Legislature - Fifth Special Session",
"classification": "special",
"end_date": "2009-12-19",
"identifier": "49th-5th-special",
"name": "49th Legislature, 5th Special Session (2009)",
"start_date": "2009-12-17",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Sixth Special Session",
"classification": "special",
"end_date": "2010-02-11",
"identifier": "49th-6th-special",
"name": "49th Legislature, 6th Special Session (2010)",
"start_date": "2010-02-01",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Seventh Special Session",
"classification": "special",
"end_date": "2010-03-16",
"identifier": "49th-7th-special",
"name": "49th Legislature, 7th Special Session (2010)",
"start_date": "2010-03-08",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Eighth Special Session",
"classification": "special",
"end_date": "2010-04-01",
"identifier": "49th-8th-special",
"name": "49th Legislature, 8th Special Session (2010)",
"start_date": "2010-03-29",
},
{
"_scraped_name": "2010 - Forty-ninth Legislature - Ninth Special Session",
"classification": "special",
"end_date": "2010-08-11",
"identifier": "49th-9th-special",
"name": "49th Legislature, 9th Special Session (2010)",
"start_date": "2010-08-09",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - First Regular Session",
"classification": "primary",
"end_date": "2011-04-20",
"identifier": "50th-1st-regular",
"name": "50th Legislature, 1st Regular Session (2011)",
"start_date": "2011-01-10",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - First Special Session",
"classification": "special",
"end_date": "2011-01-20",
"identifier": "50th-1st-special",
"name": "50th Legislature, 1st Special Session (2011)",
"start_date": "2011-01-19",
},
{
"_scraped_name": "2012 - Fiftieth Legislature - Second Regular Session",
"classification": "primary",
"identifier": "50th-2nd-regular",
"name": "50th Legislature, 2nd Regular Session (2012)",
"start_date": "2012-01-09",
"end_date": "2012-05-03",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Second Special Session",
"classification": "special",
"end_date": "2011-02-16",
"identifier": "50th-2nd-special",
"name": "50th Legislature, 2nd Special Session (2011)",
"start_date": "2011-02-14",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Third Special Session",
"classification": "special",
"end_date": "2011-06-13",
"identifier": "50th-3rd-special",
"name": "50th Legislature, 3rd Special Session (2011)",
"start_date": "2011-06-10",
},
{
"_scraped_name": "2011 - Fiftieth Legislature - Fourth Special Session",
"classification": "special",
"end_date": "2011-11-01",
"identifier": "50th-4th-special",
"name": "50th Legislature, 4th Special Session (2011)",
"start_date": "2011-11-01",
},
{
"_scraped_name": "2013 - Fifty-first Legislature - First Regular Session",
"classification": "primary",
"identifier": "51st-1st-regular",
"name": "51st Legislature - 1st Regular Session (2013)",
"start_date": "2013-01-14",
"end_date": "2013-06-14",
},
{
"_scraped_name": "2013 - Fifty-first Legislature - First Special Session",
"classification": "primary",
"identifier": "51st-1st-special",
"name": "51st Legislature - 1st Special Session (2013)",
"start_date": "2013-06-11",
"end_date": "2013-06-14",
},
{
"_scraped_name": "2014 - Fifty-first Legislature - Second Regular Session",
"classification": "primary",
"identifier": "51st-2nd-regular",
"name": "51st Legislature - 2nd Regular Session",
"start_date": "2014-01-13",
"end_date": "2014-04-24",
},
{
"_scraped_name": "2014 - Fifty-first Legislature - Second Special Session",
"classification": "special",
"identifier": "51st-2nd-special",
"name": "51st Legislature - 2nd Special Session",
"start_date": "2014-05-27",
"end_date": "2014-05-29",
},
{
"_scraped_name": "2015 - Fifty-second Legislature - First Regular Session",
"classification": "primary",
"identifier": "52nd-1st-regular",
"name": "52nd Legislature - 1st Regular Session",
"start_date": "2015-01-12",
"end_date": "2015-04-02",
},
{
"_scraped_name": "2015 - Fifty-second Legislature - First Special Session",
"classification": "special",
"identifier": "52nd-1st-special",
"name": "52nd Legislature - 1st Special Session",
"start_date": "2015-10-28",
"end_date": "2015-10-30",
},
{
"_scraped_name": "2016 - Fifty-second Legislature - Second Regular Session",
"classification": "primary",
"identifier": "52nd-2nd-regular",
"name": "52nd Legislature - 2nd Regular Session",
"start_date": "2016-01-11",
"end_date": "2016-05-07",
},
{
"_scraped_name": "2017 - Fifty-third Legislature - First Regular Session",
"classification": "primary",
"end_date": "2017-05-03",
"identifier": "53rd-1st-regular",
"name": "53rd Legislature - 1st Regular Session",
"start_date": "2017-01-09",
},
{
"_scraped_name": "2018 - Fifty-third Legislature - First Special Session",
"classification": "special",
"identifier": "53rd-1st-special",
"name": "53rd Legislature - 1st Special Session",
"start_date": "2018-01-22",
"end_date": "2018-01-26",
},
{
"_scraped_name": "2018 - Fifty-third Legislature - Second Regular Session",
"classification": "primary",
"identifier": "53rd-2nd-regular",
"name": "53rd Legislature - 2nd Regular Session",
"start_date": "2018-01-08",
"end_date": "2018-05-03",
},
{
"_scraped_name": "2019 - Fifty-fourth Legislature - First Regular Session",
"classification": "primary",
"identifier": "54th-1st-regular",
"name": "54th Legislature - 1st Regular Session",
"start_date": "2019-01-14",
"end_date": "2019-03-29",
},
{
"_scraped_name": "2020 - Fifty-fourth Legislature - Second Regular Session",
"classification": "primary",
"identifier": "54th-2nd-regular",
"name": "54th Legislature - 2nd Regular Session",
"start_date": "2020-01-13",
},
]
ignored_scraped_sessions = [
"2008 - Forty-eighth Legislature - Second Regular Session",
"2007 - Forty-eighth Legislature - First Regular Session",
"2006 - Forty-seventh Legislature - First Special Session",
"2006 - Forty-seventh Legislature - Second Regular Session",
"2005 - Forty-seventh Legislature - First Regular Session",
"2004 - Forty-sixth Legislature - Second Regular Session",
"2003 - Forty-sixth Legislature - Second Special Session",
"2003 - Forty-sixth Legislature - First Special Session",
"2003 - Forty-sixth Legislature - First Regular Session",
"2002 - Forty-fifth Legislature - Sixth Special Session",
"2002 - Forty-fifth Legislature - Fifth Special Session",
"2002 - Forty-fifth Legislature - Fourth Special Session",
"2002 - Forty-fifth Legislature - Third Special Session",
"2002 - Forty-fifth Legislature - Second Regular Session",
"2001 - Forty-fifth Legislature - Second Special Session",
"2001 - Forty-fifth Legislature - First Special Session",
"2001 - Forty-fifth Legislature - First Regular Session",
"2000 - Forty-fourth Legislature - Seventh Special Session",
"2000 - Forty-fourth Legislature - Sixth Special Session",
"2000 - Forty-fourth Legislature - Fifth Special Session",
"2000 - Forty-fourth Legislature - Fourth Special Session",
"2000 - Forty-fourth Legislature - Second Regular Session",
"1999 - Forty-fourth Legislature - Third Special Session",
"1999 - Forty-fourth Legislature - Second Special Session",
"1999 - Forty-fourth Legislature - First Special Session",
"1999 - Forty-fourth Legislature - First Regular Session",
"1998 - Forty-third Legislature - Sixth Special Session",
"1998 - Forty-third Legislature - Fifth Special Session",
"1998 - Forty-third Legislature - Fourth Special Session",
"1998 - Forty-third Legislature - Third Special Session",
"1998 - Forty-third Legislature - Second Regular Session",
"1997 - Forty-third Legislature - Second Special Session",
"1997 - Forty-third Legislature - First Special Session",
"1997 - Forty-third Legislature - First Regular Session",
"1996 - Forty-second Legislature - Seventh Special Session",
"1996 - Forty-second Legislature - Sixth Special Session",
"1996 - Forty-second Legislature - Fifth Special Session",
"1996 - Forty-second Legislature - Second Regular Session",
"1995 - Forty-second Legislature - Fourth Special Session",
"1995 - Forty-second Legislature - Third Special Session",
"1995 - Forty-Second Legislature - Second Special Session",
"1995 - Forty-Second Legislature - First Special Session",
"1995 - Forty-second Legislature - First Regular Session",
"1994 - Forty-first Legislature - Ninth Special Session",
"1994 - Forty-first Legislature - Eighth Special Session",
"1994 - Forty-first Legislature - Second Regular Session",
"1993 - Forty-first Legislature - Seventh Special Session",
"1993 - Forty-first Legislature - Sixth Special Session",
"1993 - Forty-first Legislature - Fifth Special Session",
"1993 - Forty-first Legislature - Fourth Special Session",
"1993 - Forty-first Legislature - Third Special Session",
"1993 - Forty-first Legislature - Second Special Session",
"1993 - Forty-first Legislature - First Special Session",
"1993 - Forty-first Legislature - First Regular Session",
"1992 - Fortieth Legislature - Ninth Special Session",
"1992 - Fortieth Legislature - Eighth Special Session",
"1992 - Fortieth Legislature - Seventh Special Session",
"1992 - Fortieth Legislature - Fifth Special Session",
"1992 - Fortieth Legislature - Sixth Special Session",
"1992 - Fortieth Legislature - Second Regular Session",
"1991 - Fortieth Legislature - Fourth Special Session",
"1991 - Fortieth Legislature - Third Special Session",
"1991 - Fortieth Legislature - Second Special Session",
"1991 - Fortieth Legislature - First Special Session",
"1991 - Fortieth Legislature - First Regular Session",
"1990 - Thirty-ninth Legislature - Fifth Special Session",
"1990 - Thirty-ninth Legislature - Fourth Special Session",
"1990 - Thirty-ninth Legislature - Third Special Session",
"1990 - Thirty-ninth Legislature - Second Regular Session",
"1989 - Thirty-ninth Legislature - Second Special Session",
"1989 - Thirty-ninth Legislature - First Special Session",
"1989 - Thirty-ninth Legislature - First Regular Session",
]
def get_session_list(self):
session = requests.Session()
data = session.get("https://www.azleg.gov/")
# TODO: JSON at https://apps.azleg.gov/api/Session/
doc = lxml.html.fromstring(data.text)
sessions = doc.xpath("//select/option/text()")
sessions = [re.sub(r"\(.+$", "", x).strip() for x in sessions]
return sessions
|
gpl-3.0
| -4,138,875,049,610,144,300
| 44.878338
| 88
| 0.563935
| false
| 3.661141
| false
| false
| false
|
jcsalterego/pynsour
|
src/bot.py
|
1
|
5843
|
"""Bot class"""
import os
import socket
from parser import Parser
from logger import Logger
from sandbox import Sandbox
import botcode
MAX_CONSOLE_LEN = 50
BUFFER_SIZE = 1024
STATE_DISCONNECTED = 0
STATE_CONNECTING = 1
STATE_HANDSHAKE = 2
STATE_CONNECTED = 3
STATE_ONLINE = 4
class Bot:
def __init__(self):
"""Constructor
"""
self.__state = STATE_DISCONNECTED
self.__load_defaults()
self.parser = Parser()
self.logger = Logger()
self.sandbox = Sandbox()
def __load_defaults(self):
"""Loads default settings
"""
self.username = os.getlogin()
self.password = None
self.nicks = ["nick", "altnick"]
self.realname = "Default pynsour user"
self.handlers = []
self.localhost = 'localhost'
self.on_connect = []
self.ops = []
self.name = ""
def asDict(self):
"""Return object as dictionary
Ignores doc variables
"""
info = {}
for attr in dir(self):
if attr[0] == "_" or attr[:2] == "__":
continue
i = getattr(self, attr)
if type(i).__name__ == "instancemethod":
continue
else:
info[attr] = i
return info
def connect(self):
"""Connect the bot to the IRC server
"""
self.__state = STATE_CONNECTING
self.__connection = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.logger.console("+++ Connecting to %s:%s" %
(self.hostname, self.port))
self.__connection.connect((self.hostname, self.port))
def event(self):
"""Event fire
"""
if self.__state == STATE_DISCONNECTED:
return
elif self.__state == STATE_CONNECTING:
if self.password:
self.write("PASS %s" % self.password)
self.write("NICK %s" % self.nicks[0])
self.write("USER %s %s %s :%s" %
(self.username,
self.localhost,
self.hostname,
self.realname))
self.__state = STATE_HANDSHAKE
elif self.__state == STATE_HANDSHAKE:
pass
self.read()
self.ops += self.parser.parse()
self.execute()
def execute(self):
"""Execute botcode
"""
# Expand meta-ops, e.g. connect events
new_ops = []
for operation in self.ops:
if operation[0] == botcode.OP_EVENT_CONNECT:
new_ops += self.on_connect
self.__state = STATE_ONLINE
elif operation[0] == botcode.OP_EVENT_PRIVMSG:
sandbox_ops = self.filter_eval(operation[1])
if sandbox_ops:
new_ops += self.sandbox.execute(sandbox_ops)
else:
new_ops.append(operation)
self.ops = new_ops
while len(self.ops) > 0:
new_ops = []
for operation in self.ops:
if operation[0] == botcode.OP_PONG:
self.write("PONG :%s" % operation[1])
elif operation[0] == botcode.OP_JOIN:
if len(operation) == 2:
self.write("JOIN %s :%s" % operation[1])
elif len(operation) == 1:
self.write("JOIN %s" % operation[1])
elif operation[0] == botcode.OP_MODE:
self.write("MODE %s" % operation[1])
elif operation[0] == botcode.OP_PRIVMSG:
self.write("PRIVMSG %s :%s" % operation[1:3])
elif operation[0] == botcode.OP_ERROR:
self.logger.console("ERR\n"
"%s" % operation[1])
self.ops = new_ops
# self.ops will be empty by here
def filter_eval(self, line):
"""Filter based on channel
"""
ops = []
words = line.split(":", 1)
if len(words) == 1:
return ops
args, msg = words
argv = args.split(" ")
if len(argv) < 4:
return ops
sender, action, recipient = argv[:3]
path = "%s/%s" % (self.name, recipient)
for handler in self.handlers:
re = handler['channel_re']
if re.match(path):
# self.logger.console("F: %s %s" % (path, argv))
script_path = re.sub(handler['script'].replace("$", "\\"),
path)
ops += (botcode.OP_EVENT_SCRIPT,
script_path,
(sender, action, recipient, msg)),
return ops
def read(self):
"""Reading from connection
"""
if self.__state > STATE_DISCONNECTED:
incoming = self.__connection.recv(BUFFER_SIZE)
self.parser.append(incoming)
read_bytes = len(incoming)
first_line = incoming.split("\n")[0]
if len(first_line) > MAX_CONSOLE_LEN:
first_line = "%s..." % first_line[:MAX_CONSOLE_LEN]
self.logger.console(" IN [%4d] %s" % (read_bytes,
first_line))
def write(self, outgoing):
"""Writing to connection
"""
first_line = outgoing
outgoing = "".join((outgoing, "\r\n"))
write_bytes = len(outgoing)
if len(first_line) > MAX_CONSOLE_LEN:
first_line = "%s..." % first_line[:MAX_CONSOLE_LEN]
self.logger.console("OUT [%4d] %s" % (write_bytes,
first_line))
self.__connection.send(outgoing)
|
bsd-2-clause
| -995,107,099,016,354,200
| 30.079787
| 74
| 0.475441
| false
| 4.197557
| false
| false
| false
|
plucena24/OpenClos
|
jnpr/openclos/cli_parser.py
|
1
|
16382
|
#------------------------------------------------------------------------------
# cli_parser.py
#------------------------------------------------------------------------------
'''
@author : rgiyer
Date : October 20th, 2014
This module is responsible for parsing command model defined in
cliCommands.yaml and providing functions for:
- Validation of user-input
- invoking execution handle for CLI commands or macro expansions
- determine possible arg match for command auto-completion based
on context
'''
# Standard Python libraries
import os
import re
import inspect
import subprocess
# Packages required for openclos
import yaml
# openclos classes
import util
# cli related classes
from cli_handle_impl import CLIImplementor
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class CLICommand:
def __init__ ( self, cmd_access, cmd_handle, cmd_macro, cmd_desc ):
self.cmd_access = cmd_access
self.cmd_handle = cmd_handle
self.cmd_macro = cmd_macro
self.cmd_desc = cmd_desc
# end class CLICommand
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class CLIUtil:
def __init__ ( self ):
commandConfFile = os.path.join ( util.configLocation,
'cliCommands.yaml' )
self.yaml_file_stream = open ( commandConfFile, 'r' )
raw_graph = yaml.load ( self.yaml_file_stream )
self.cmd_graph = {}
self.indentation = 8
self.dump_cmd ( raw_graph )
self.yaml_file_stream.close ()
#------------------------------------------------------------------------------
def get_implementor_handle ( self, class_instance, handle_name ):
handles = inspect.getmembers ( class_instance,
predicate = inspect.ismethod )
for function_tuple in handles:
if ( handle_name == function_tuple [ 0 ] ):
return function_tuple [ 1 ]
# no match found
return 0
#------------------------------------------------------------------------------
# Parse through the dictionary iteratively:
def dump_cmd ( self,
cmds,
cmd_root="",
cmd_access="READ",
cmd_handle="",
cmd_macro="",
cmd_desc="" ):
for cmd in cmds:
if ( cmd_root == "" ):
cmd_compound = cmd
else:
cmd_compound = cmd_root + "_" + cmd
cmd_data = cmds [ cmd ]
# Get command access
if cmd_data.has_key ( "Access" ):
cmd_access = cmd_data [ "Access" ]
# Get command handler
if cmd_data.has_key ( "Handle" ):
cmd_handle = cmd_data [ "Handle" ]
elif ( cmd_handle != "" ):
cmd_handle = ""
# Get command macro
if cmd_data.has_key ( "Macro" ):
cmd_macro = cmd_data [ "Macro" ]
elif ( cmd_macro != "" ):
cmd_macro = ""
# Get command description
if cmd_data.has_key ( "Desc" ):
cmd_desc = cmd_data [ "Desc" ]
elif ( cmd_desc != "" ):
cmd_desc = ""
# Parse the arguments
if cmd_data.has_key ( "Args" ):
cmd_args = cmd_data [ "Args" ]
self.dump_cmd ( cmd_args,
cmd_compound,
cmd_access,
cmd_handle,
cmd_macro,
cmd_desc )
if cmd_data.has_key ( "Handle" ):
self.cmd_graph [ cmd_compound ] = CLICommand ( cmd_access,
cmd_handle,
cmd_macro,
cmd_desc )
if ( len ( cmd_compound ) > self.indentation ):
self.indentation = len ( cmd_compound )
#------------------------------------------------------------------------------
def normalize_command ( self, cmd ):
return cmd.replace ( " ", "_" )
#------------------------------------------------------------------------------
def get_indentation ( self, cmd ):
return ( self.indentation + 8 - len ( cmd ) )
#------------------------------------------------------------------------------
def suffix_macro_to_cmd ( self, macro_list, cmd ):
ret_cmd = []
for macro in macro_list:
ret_cmd.append ( self.normalize_command ( cmd + "_" + macro ) )
return ret_cmd
#------------------------------------------------------------------------------
def get_macro_list ( self, class_instance, macro_txt, add_help=None ):
fn_macro = self.get_implementor_handle ( class_instance, macro_txt )
return fn_macro ( add_help )
#------------------------------------------------------------------------------
def include_macro ( self, macro_list, ret_list ):
for item in macro_list:
ret_list.append ( item )
#------------------------------------------------------------------------------
def string_has_enter ( self, string ):
if ( re.search ( "<enter>", string ) != None ):
return 1
else:
return 0
#------------------------------------------------------------------------------
def add_enter_instruction ( self, result_list ):
if ( len ( result_list ) ):
string = result_list [ 0 ]
if ( self.string_has_enter ( string ) == 1 ):
return 0
result_list.insert ( 0, " <enter>" + " " * self.get_indentation ( "<enter" ) + "Execute the current command" )
#------------------------------------------------------------------------------
def match_macro ( self, macro_list, needle, ret_list ):
for haystack in macro_list:
if ( len ( needle ) == len ( haystack ) ):
if ( re.match ( needle, haystack ) != None ):
self.add_enter_instruction ( ret_list )
elif ( len ( needle ) < len ( haystack ) ):
if ( re.match ( needle, haystack ) != None ):
ret_list.append ( haystack )
else:
print ""
#------------------------------------------------------------------------------
def option_exists ( self, consider_option, ret_list ):
for option in ret_list:
if ( re.match ( option, consider_option ) != None ):
return 1
return 0
#------------------------------------------------------------------------------
def complete_command ( self,
part_cmd,
full_cmd,
end_index,
cmd_helper,
ret_list ):
unmatched_string = full_cmd [ end_index: ]
# This is an adjustment for "<space>" before tab / ? keypress
if ( part_cmd [ -1 ] == "_" ):
part_cmd = part_cmd [ 0:-1 ]
unmatched_string = "_" + unmatched_string
if ( unmatched_string [ 0 ] == "_" ):
# attach possible matches
possible_option = unmatched_string.replace ( "_", " " ) + ( " " * self.get_indentation ( full_cmd ) )
possible_option = possible_option + "<" + cmd_helper.cmd_desc + ">"
ret_list.append ( possible_option )
else:
# Get part of the command from part_cmd
match_object = re.search ( "_", part_cmd )
while ( match_object != None ):
part_cmd = part_cmd [ match_object.end (): ]
match_object = re.search ( "_", part_cmd )
# Get rest of the command from unmatched_string
match_object = re.search ( "_", unmatched_string )
if ( match_object != None ):
unmatched_string = unmatched_string [ :(match_object.end()-1)]
complete_word = part_cmd + unmatched_string
if ( self.option_exists ( complete_word, ret_list ) == 0 ):
ret_list.append ( complete_word )
return ret_list
#------------------------------------------------------------------------------
def get_all_cmds ( self ):
ret_list = []
for cmd in self.cmd_graph:
cmd_str = cmd.replace ( "_", " " )
cmd_str = cmd_str + ( " " * self.get_indentation ( cmd ) ) + "<" + self.cmd_graph [ cmd ].cmd_desc + ">"
ret_list.append ( cmd_str )
return ret_list
#------------------------------------------------------------------------------
# Lot of reference here to needle and haystack, needle being the current
# command context of the CLI, and haystack being the command model dict
# created during CLIUtil instantiation
#------------------------------------------------------------------------------
def get_match ( self, cmd ):
if ( len ( cmd ) == 0 or re.search ( "[a-z|A-Z|0-9]", cmd ) == None ):
return self.get_all_cmds ()
# chomp input string
if ( cmd [ -1 ] == " " ):
cmd = cmd [ 0:-1 ]
needle = self.normalize_command ( cmd )
ret_list = []
for haystack in self.cmd_graph:
len_haystack = len ( haystack )
len_needle = len ( needle )
cmd_helper = self.cmd_graph [ haystack ]
# Case 1: Full command is provided, without macro expansion
if ( len_needle == len_haystack ):
# check if we have a match
if ( re.match ( needle, haystack ) != None ):
if ( cmd_helper.cmd_macro != "" ):
self.include_macro ( self.get_macro_list ( CLIImplementor (), cmd_helper.cmd_macro, "add help" ), ret_list )
else:
self.add_enter_instruction ( ret_list )
# Case 2: Full command is provided with macro expansion
elif ( len_needle > len_haystack ):
match_object = re.match ( haystack, needle )
if ( match_object != None ):
# Match exists - so get the macro
cmd_macro = needle [ match_object.end (): ]
if ( cmd_macro [ 0 ] == "_" and len ( cmd_macro ) > 1 ):
cmd_macro = cmd_macro [ 1: ]
if ( cmd_helper.cmd_macro != "" ):
cmd_macro_list = self.get_macro_list ( CLIImplementor(),
cmd_helper.cmd_macro )
self.match_macro ( cmd_macro_list, cmd_macro, ret_list )
# Case 3: Part command is provided
elif ( len_needle < len_haystack ):
match_object = re.match ( needle, haystack )
if ( match_object != None ):
# Match exists - get rest of the command
balance_cmd = haystack [ match_object.end (): ]
self.complete_command ( needle,
haystack,
match_object.end (),
self.cmd_graph [ haystack ],
ret_list )
return ret_list
#------------------------------------------------------------------------------
def chomp ( self, token ):
match_object = re.search ( "[a-z|A-Z|0-9]", token )
if ( match_object != None ):
token = token [ ( match_object.end () - 1): ]
token = token [ ::-1 ]
match_object = re.search ( "[a-z|A-Z|0-9]", token )
if ( match_object != None ):
token = token [ ( match_object.end () - 1): ]
token = token [ ::-1 ]
return token
#------------------------------------------------------------------------------
def validate_command_and_execute ( self, full_cmd_context ):
# We will do the validation again in case this function is called
# outside the CLI context
best_cmd_match = ""
best_cmd_args = ""
best_cmd_handle = None
for command in self.cmd_graph:
match_object = re.match ( command,
self.normalize_command ( full_cmd_context ) )
if ( match_object != None ):
# Okay - we found a match. Get macros if included
command_args = ""
# TODO - different impl here for multiple args support
if ( len ( full_cmd_context ) > len ( command ) ):
command_args = self.chomp ( full_cmd_context [ match_object.end (): ] )
if ( len ( best_cmd_match ) < len ( command ) ):
best_cmd_match = command
best_cmd_args = command_args
best_cmd_handle = self.get_implementor_handle ( CLIImplementor (), self.cmd_graph [ command ].cmd_handle )
if ( best_cmd_handle != 0 ):
return best_cmd_handle ( best_cmd_args )
else:
print self.cmd_graph [ best_cmd_match ].cmd_handle + " not implemented"
#------------------------------------------------------------------------------
def print_results ( self, result_list ):
for result in result_list:
print "\t" + result
#------------------------------------------------------------------------------
def print_command_graph ( self, cmd_dict ):
for keys in cmd_dict:
print keys + "=>"
cmd = cmd_dict [ keys ]
if ( cmd.cmd_desc != "" ):
print " " + cmd.cmd_desc
print " " + cmd.cmd_access
if ( cmd.cmd_macro != "" ):
fn_macro = self.get_implementor_handle ( CLIImplementor (),
cmd.cmd_macro )
if ( fn_macro != 0 ):
print fn_macro ()
else:
print " Macro not implemented"
if ( cmd.cmd_handle != "" ):
fn_handle = self.get_implementor_handle ( CLIImplementor (),
cmd.cmd_handle )
if ( fn_handle != 0 ):
fn_handle ()
else:
print " Handler not implemented"
# end class CLIUtil
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
cli_util = CLIUtil ()
match_options = [ "create cabling",
# "create cabling-plan",
# "create cabling-",
# "create cabling",
# "create cabling-plan pod",
# "create cabling-plan pod pod_2",
# "create",
# "create dev",
# "create device-config",
# "create device-config p",
# "create device-config pod",
# "create device-config pod pod_1",
# "run",
# "update password",
# "run r",
# "run RE",
# "create cab",
"create pods",
"create pods from",
"create pods from-file",
"" ]
if __name__ == '__main__':
for match in match_options:
print "Matching results for " + match + " is:"
cli_util.print_results ( cli_util.get_match ( match ) )
print "------------------------------------------------------"
|
apache-2.0
| -8,635,042,734,460,953,000
| 39.751244
| 132
| 0.398608
| false
| 4.878499
| false
| false
| false
|
xapple/plumbing
|
plumbing/scraping/blockers.py
|
1
|
1536
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
# Internal modules #
# First party modules #
from autopaths.file_path import FilePath
################################################################################
def check_blocked_request(tree):
"""
Check if the request was denied by the server.
And raise an exception if it was.
"""
# Modules #
from lxml import etree
# Did we get a filepath? #
if isinstance(tree, FilePath):
if tree.count_bytes > 1000000: return
tree = tree.contents
# Did we get a tree or raw text? #
if isinstance(tree, str): tree = etree.HTML(tree)
# By default we are good #
blocked = False
# Try Incapsula #
blocked = blocked or check_incapsula(tree)
# If we were indeed blocked, we can stop here #
if blocked: raise Exception("The request was flagged and blocked by the server.")
################################################################################
def check_incapsula(tree):
# By default we are good #
blocked = False
# Result type 1 from Incapsula #
meta = tree.xpath("//head/meta[@name='ROBOTS']")
if meta and 'NOINDEX' in meta[0].get('content'): blocked = True
# Result type 2 from Incapsula #
meta = tree.xpath("//head/meta[@name='robots']")
if meta and 'noindex' in meta[0].get('content'): blocked = True
# If we were indeed blocked, we can stop here #
return blocked
|
mit
| -2,501,178,769,367,362,000
| 30.367347
| 85
| 0.580729
| false
| 3.98961
| false
| false
| false
|
juja256/tasks_manager
|
des/perm.py
|
1
|
1503
|
from bitarray import *
class Permutation:
def __init__(self, l):
# if sorted(l) != range(1, len(l)+1):
# raise ValueError("List is not valid!")
self.__bare = [i - 1 for i in l]
def Get(self):
return self.__bare
def Reverse(self):
rev = [0] * len(self.__bare)
for i in range(0, len(self.__bare)):
rev[self.__bare[i]] = i + 1
return Permutation(rev)
def Substitude(self, msg):
"""
Substitudes all bits in input message
"""
bits = bitarray()
if type(msg) == str or type(msg) == bytes:
bits.frombytes(msg)
elif type(msg) == bitarray:
bits = msg
else:
raise ValueError("Not valid type of input data")
res = bitarray(bits.length() * [0])
size = len(self.__bare)
for i in range(0, bits.length()):
res[i] = bits[(i // size) * size + self.__bare[i % size]]
return res
def Reduce(self, block, size):
"""
Shrinks or extends block to specified size with permutation
"""
bits = bitarray()
if type(block) == str or type(block) == bytes:
bits.frombytes(block)
elif type(block) == bitarray:
bits = block
else:
raise ValueError("Not valid type of input data")
res = bitarray(size * [0])
for i in range(0, size):
res[i] = bits[self.__bare[i]]
return res
|
gpl-2.0
| 2,248,312,554,323,554,300
| 25.857143
| 69
| 0.503659
| false
| 3.863753
| false
| false
| false
|
lzkelley/sne
|
scripts/import.py
|
1
|
293386
|
#!/usr/local/bin/python3.5
import csv
import os
import re
import urllib
import requests
import calendar
import sys
import json
import codecs
import resource
import argparse
import gzip
import io
import shutil
import statistics
import warnings
from datetime import timedelta, datetime
from glob import glob
from hashlib import md5
from html import unescape
from cdecimal import Decimal
from astroquery.vizier import Vizier
from astroquery.simbad import Simbad
from astroquery.irsa_dust import IrsaDust
from copy import deepcopy
from astropy import constants as const
from astropy import units as un
from astropy.io import fits
from astropy.time import Time as astrotime
from astropy.cosmology import Planck15 as cosmo, z_at_value
from collections import OrderedDict, Sequence
from math import log10, floor, sqrt, isnan, ceil
from bs4 import BeautifulSoup, Tag, NavigableString
from string import ascii_letters
from photometry import *
from tq import *
from digits import *
from repos import *
from events import *
parser = argparse.ArgumentParser(description='Generate a catalog JSON file and plot HTML files from SNE data.')
parser.add_argument('--update', '-u', dest='update', help='Only update catalog using live sources.', default=False, action='store_true')
parser.add_argument('--verbose', '-v', dest='verbose', help='Print more messages to the screen.', default=False, action='store_true')
parser.add_argument('--refresh', '-r', dest='refresh', help='Ignore most task caches.', default=False, action='store_true')
parser.add_argument('--full-refresh', '-f', dest='fullrefresh', help='Ignore all task caches.', default=False, action='store_true')
parser.add_argument('--archived', '-a', dest='archived', help='Always use task caches.', default=False, action='store_true')
parser.add_argument('--travis', '-tr', dest='travis', help='Run import script in test mode for Travis.', default=False, action='store_true')
parser.add_argument('--refreshlist', '-rl', dest='refreshlist', help='Comma-delimited list of caches to clear.', default='')
args = parser.parse_args()
tasks = OrderedDict([
("deleteoldevents", {"nicename":"Deleting old events", "update": False}),
("internal", {"nicename":"%pre metadata and photometry", "update": False}),
("radio", {"nicename":"%pre radio data", "update": False}),
("xray", {"nicename":"%pre X-ray data", "update": False}),
("simbad", {"nicename":"%pre SIMBAD", "update": False}),
("vizier", {"nicename":"%pre VizieR", "update": False}),
("donations", {"nicename":"%pre donations", "update": False}),
("pessto-dr1", {"nicename":"%pre PESSTO DR1", "update": False}),
("scp", {"nicename":"%pre SCP", "update": False}),
("ascii", {"nicename":"%pre ASCII", "update": False}),
("cccp", {"nicename":"%pre CCCP", "update": False, "archived": True}),
("suspect", {"nicename":"%pre SUSPECT", "update": False}),
("cfa", {"nicename":"%pre CfA archive photometry", "update": False}),
("ucb", {"nicename":"%pre UCB photometry", "update": False, "archived": True}),
("sdss", {"nicename":"%pre SDSS photometry", "update": False}),
("csp", {"nicename":"%pre CSP photometry", "update": False}),
("itep", {"nicename":"%pre ITEP", "update": False}),
("asiago", {"nicename":"%pre Asiago metadata", "update": False}),
("tns", {"nicename":"%pre TNS metadata", "update": True, "archived": True}),
("rochester", {"nicename":"%pre Latest Supernovae", "update": True, "archived": False}),
("lennarz", {"nicename":"%pre Lennarz", "update": False}),
("fermi", {"nicename":"%pre Fermi", "update": False}),
("gaia", {"nicename":"%pre GAIA", "update": True, "archived": False}),
("ogle", {"nicename":"%pre OGLE", "update": True, "archived": False}),
("snls", {"nicename":"%pre SNLS", "update": False}),
("psthreepi", {"nicename":"%pre Pan-STARRS 3π", "update": True, "archived": False}),
("psmds", {"nicename":"%pre Pan-STARRS MDS", "update": False}),
("crts", {"nicename":"%pre CRTS", "update": True, "archived": False}),
("snhunt", {"nicename":"%pre SNhunt", "update": True, "archived": False}),
("nedd", {"nicename":"%pre NED-D", "update": False}),
("cpcs", {"nicename":"%pre CPCS", "update": True, "archived": False}),
("ptf", {"nicename":"%pre PTF", "update": False, "archived": False}),
("des", {"nicename":"%pre DES", "update": False, "archived": False}),
("asassn", {"nicename":"%pre ASASSN", "update": True }),
#("asiagospectra", {"nicename":"%pre Asiago spectra", "update": True }),
#("wiserepspectra", {"nicename":"%pre WISeREP spectra", "update": False}),
#("cfaspectra", {"nicename":"%pre CfA archive spectra", "update": False}),
#("snlsspectra", {"nicename":"%pre SNLS spectra", "update": False}),
#("cspspectra", {"nicename":"%pre CSP spectra", "update": False}),
#("ucbspectra", {"nicename":"%pre UCB spectra", "update": True, "archived": True}),
#("suspectspectra", {"nicename":"%pre SUSPECT spectra", "update": False}),
#("snfspectra", {"nicename":"%pre SNH spectra", "update": False}),
#("superfitspectra", {"nicename":"%pre Superfit spectra", "update": False}),
#("mergeduplicates", {"nicename":"Merging duplicates", "update": False}),
#("setprefnames", {"nicename":"Setting preferred names", "update": False}),
("writeevents", {"nicename":"Writing events", "update": True })
])
oscbibcode = '2016arXiv160501054G'
oscname = 'The Open Supernova Catalog'
oscurl = 'https://sne.space'
cfaack = ("This research has made use of the CfA Supernova Archive, "
"which is funded in part by the National Science Foundation "
"through grant AST 0907903.")
clight = const.c.cgs.value
km = (1.0 * un.km).cgs.value
planckh = const.h.cgs.value
keV = (1.0 * un.keV).cgs.value
travislimit = 10
currenttask = ''
eventnames = []
events = OrderedDict()
warnings.filterwarnings('ignore', r'Warning: converting a masked element to nan.')
with open('type-synonyms.json', 'r') as f:
typereps = json.loads(f.read(), object_pairs_hook=OrderedDict)
with open('source-synonyms.json', 'r') as f:
sourcereps = json.loads(f.read(), object_pairs_hook=OrderedDict)
with open('non-sne-types.json', 'r') as f:
nonsnetypes = json.loads(f.read(), object_pairs_hook=OrderedDict)
nonsnetypes = [x.upper() for x in nonsnetypes]
repbetterquantity = {
'redshift',
'ebv',
'velocity',
'lumdist',
'discoverdate',
'maxdate'
}
maxbands = [
['B', 'b', 'g'], # B-like bands first
['V', 'G'], # if not, V-like bands
['R', 'r'] # if not, R-like bands
]
def uniq_cdl(values):
return ','.join(list(OrderedDict.fromkeys(values).keys()))
def event_attr_priority(attr):
if attr == 'photometry':
return 'zzzzzzzy'
if attr == 'spectra':
return 'zzzzzzzz'
if attr == 'name':
return 'aaaaaaaa'
if attr == 'sources':
return 'aaaaaaab'
if attr == 'alias':
return 'aaaaaaac'
return attr
prefkinds = ['heliocentric', 'cmb', 'spectroscopic', 'photometric', 'host', 'cluster', '']
def frame_priority(attr):
if 'kind' in attr:
if attr['kind'] in prefkinds:
return prefkinds.index(attr['kind'])
else:
return len(prefkinds)
return len(prefkinds)
def alias_priority(name, attr):
if name == attr:
return 0
return 1
def ct_priority(name, attr):
aliases = attr['source'].split(',')
max_source_year = -10000
vaguetypes = ['CC', 'I']
if attr['value'] in vaguetypes:
return -max_source_year
for alias in aliases:
if alias == 'D':
continue
source = get_source_by_alias(name, alias)
if 'bibcode' in source:
source_year = get_source_year(source)
if source_year > max_source_year:
max_source_year = source_year
return -max_source_year
def get_source_year(source):
if 'bibcode' in source:
if is_number(source['bibcode'][:4]):
return int(source['bibcode'][:4])
else:
return -10000
raise(ValueError('No bibcode available for source!'))
def name_clean(name):
newname = name.strip(' ;,*')
if newname.startswith('MASJ'):
newname = newname.replace('MASJ', 'MASTER OT J', 1)
if newname.startswith('MASTER') and is_number(newname[7]):
newname = newname.replace('MASTER', 'MASTER OT J', 1)
if newname.startswith('MASTER OT J '):
newname = newname.replace('MASTER OT J ', 'MASTER OT J', 1)
if newname.startswith('Psn'):
newname = newname.replace('Psn', 'PSN', 1)
if newname.startswith('PSNJ'):
newname = newname.replace('PSNJ', 'PSN J', 1)
if newname.startswith('TCPJ'):
newname = newname.replace('TCPJ', 'TCP J', 1)
if newname.startswith('SMTJ'):
newname = newname.replace('SMTJ', 'SMT J', 1)
if newname.startswith('PSN20J'):
newname = newname.replace('PSN20J', 'PSN J', 1)
if newname.startswith('ASASSN') and newname[6] != '-':
newname = newname.replace('ASASSN', 'ASASSN-', 1)
if newname.startswith('ROTSE3J'):
newname = newname.replace('ROTSE3J', 'ROTSE3 J', 1)
if newname.startswith('SNHunt'):
newname = newname.replace('SNHunt', 'SNhunt', 1)
if newname.startswith('ptf'):
newname = newname.replace('ptf', 'PTF', 1)
if newname.startswith('PTF '):
newname = newname.replace('PTF ', 'PTF', 1)
if newname.startswith('iPTF '):
newname = newname.replace('iPTF ', 'iPTF', 1)
if newname.startswith('SNHunt'):
newname = newname.replace('SNHunt', 'SNhunt', 1)
if newname.startswith('PESSTOESO'):
newname = newname.replace('PESSTOESO', 'PESSTO ESO ', 1)
if newname.startswith('snf'):
newname = newname.replace('snf', 'SNF', 1)
if newname.startswith('SNF') and is_number(newname[3:]) and len(newname) >= 12:
newname = 'SNF' + newname[3:11] + '-' + newname[11:]
if newname.startswith(('MASTER OT J', 'ROTSE3 J')):
prefix = newname.split('J')[0]
coords = newname.split('J')[-1].strip()
decsign = '+' if '+' in coords else '-'
coordsplit = coords.replace('+','-').split('-')
if '.' not in coordsplit[0] and len(coordsplit[0]) > 6 and '.' not in coordsplit[1] and len(coordsplit[1]) > 6:
newname = (prefix + 'J' + coordsplit[0][:6] + '.' + coordsplit[0][6:] +
decsign + coordsplit[1][:6] + '.' + coordsplit[1][6:])
if newname.startswith('Gaia ') and is_number(newname[3:4]) and len(newname) > 5:
newname = newname.replace('Gaia ', 'Gaia', 1)
if len(newname) <= 4 and is_number(newname):
newname = 'SN' + newname + 'A'
if len(newname) > 4 and is_number(newname[:4]) and not is_number(newname[4:]):
newname = 'SN' + newname
if newname.startswith('sn') and is_number(newname[2:6]) and len(newname) > 6:
newname = newname.replace('sn', 'SN', 1)
if newname.startswith('SN ') and is_number(newname[3:7]) and len(newname) > 7:
newname = newname.replace('SN ', 'SN', 1)
if newname.startswith('SN') and is_number(newname[2:6]) and len(newname) == 7 and newname[6].islower():
newname = 'SN' + newname[2:6] + newname[6].upper()
elif (newname.startswith('SN') and is_number(newname[2:6]) and
(len(newname) == 8 or len(newname) == 9) and newname[6:].isupper()):
newname = 'SN' + newname[2:6] + newname[6:].lower()
newname = (' '.join(newname.split())).strip()
return newname
def get_aliases(name, includename = True):
if 'alias' in events[name]:
aliases = [x['value'] for x in events[name]['alias']]
if includename and name not in aliases:
return [name] + aliases
return aliases
if includename:
return [name]
return []
def add_event(name, load = True, delete = True, source = '', loadifempty = True):
if loadifempty and args.update and not len(events):
load_stubs()
newname = name_clean(name)
if newname not in events or 'stub' in events[newname]:
match = ''
if newname not in events:
for event in events:
aliases = get_aliases(event)
if (len(aliases) > 1 and newname in aliases and
('distinctfrom' not in events[event] or newname not in events[event]['distinctfrom'])):
match = event
break
if match:
newname = match
if load:
loadedname = load_event_from_file(name = newname, delete = delete)
if loadedname:
if 'stub' in events[loadedname]:
raise(ValueError('Failed to find event file for stubbed event'))
return loadedname
if match:
return match
events[newname] = OrderedDict()
events[newname]['name'] = newname
if source:
add_quantity(newname, 'alias', newname, source)
if args.verbose and 'stub' not in events[newname]:
tprint('Added new event ' + newname)
return newname
else:
return newname
def event_exists(name):
if name in events:
return True
for ev in events:
if name in get_aliases(ev):
return True
return False
def get_preferred_name(name):
if name not in events:
matches = []
for event in events:
aliases = get_aliases(event)
if len(aliases) > 1 and name in aliases:
return event
return name
else:
return name
def snname(string):
newstring = string.replace(' ', '').upper()
if (newstring[:2] == "SN"):
head = newstring[:6]
tail = newstring[6:]
if len(tail) >= 2 and tail[1] != '?':
tail = tail.lower()
newstring = head + tail
return newstring
def add_source(name, refname = '', reference = '', url = '', bibcode = '', secondary = '', acknowledgment = ''):
nsources = len(events[name]['sources']) if 'sources' in events[name] else 0
if not refname:
if not bibcode:
raise(ValueError('Bibcode must be specified if name is not.'))
if bibcode and len(bibcode) != 19:
raise(ValueError('Bibcode "' + bibcode + '" must be exactly 19 characters long'))
refname = bibcode
if refname.upper().startswith('ATEL') and not bibcode:
refname = refname.replace('ATEL', 'ATel').replace('Atel', 'ATel').replace('ATel #', 'ATel ').replace('ATel#', 'ATel').replace('ATel', 'ATel ')
refname = ' '.join(refname.split())
atelnum = refname.split()[-1]
if is_number(atelnum) and atelnum in atelsdict:
bibcode = atelsdict[atelnum]
if refname.upper().startswith('CBET') and not bibcode:
refname = refname.replace('CBET', 'CBET ')
refname = ' '.join(refname.split())
cbetnum = refname.split()[-1]
if is_number(cbetnum) and cbetnum in cbetsdict:
bibcode = cbetsdict[cbetnum]
if refname.upper().startswith('IAUC') and not bibcode:
refname = refname.replace('IAUC', 'IAUC ')
refname = ' '.join(refname.split())
iaucnum = refname.split()[-1]
if is_number(iaucnum) and iaucnum in iaucsdict:
bibcode = iaucsdict[iaucnum]
for rep in sourcereps:
if refname in sourcereps[rep]:
refname = rep
break
if 'sources' not in events[name] or (refname not in [x['name'] for x in events[name]['sources']] and
(not bibcode or bibcode not in [x['bibcode'] if 'bibcode' in x else '' for x in events[name]['sources']])):
source = str(nsources + 1)
newsource = OrderedDict()
newsource['name'] = refname
if url:
newsource['url'] = url
if reference:
newsource['reference'] = reference
if bibcode:
newsource['bibcode'] = bibcode
if acknowledgment:
newsource['acknowledgment'] = acknowledgment
newsource['alias'] = source
if secondary:
newsource['secondary'] = True
events[name].setdefault('sources',[]).append(newsource)
else:
if refname in [x['name'] for x in events[name]['sources']]:
source = [x['alias'] for x in events[name]['sources']][
[x['name'] for x in events[name]['sources']].index(refname)]
elif bibcode and bibcode in [x['bibcode'] if 'bibcode' in x else '' for x in events[name]['sources']]:
source = [x['alias'] for x in events[name]['sources']][
[x['bibcode'] if 'bibcode' in x else '' for x in events[name]['sources']].index(bibcode)]
else:
raise(ValueError("Couldn't find source that should exist!"))
return source
def get_source_by_alias(name, alias):
for source in events[name]['sources']:
if source['alias'] == alias:
return source
raise(ValueError('Source alias not found!'))
def same_tag_str(photo, val, tag):
issame = ((tag not in photo and not val) or (tag in photo and not val) or (tag in photo and photo[tag] == val))
return issame
def same_tag_num(photo, val, tag, canbelist = False):
issame = ((tag not in photo and not val) or (tag in photo and not val) or (tag in photo and
((not canbelist and Decimal(photo[tag]) == Decimal(val)) or
(canbelist and
((isinstance(photo[tag], str) and isinstance(val, str) and Decimal(photo[tag]) == Decimal(val)) or
(isinstance(photo[tag], list) and isinstance(val, list) and photo[tag] == val))))))
return issame
def add_photometry(name, time = "", u_time = "MJD", e_time = "", telescope = "", instrument = "", band = "",
magnitude = "", e_magnitude = "", source = "", upperlimit = False, system = "",
observatory = "", observer = "", host = False, includeshost = False, survey = "",
flux = "", fluxdensity = "", e_flux = "", e_fluxdensity = "", u_flux = "", u_fluxdensity = "", frequency = "",
u_frequency = "", counts = "", e_counts = "", nhmw = "", photonindex = "", unabsorbedflux = "",
e_unabsorbedflux = "", energy = "", u_energy = "", e_lower_magnitude = "", e_upper_magnitude = ""):
if (not time and not host) or (not magnitude and not flux and not fluxdensity and not counts and not unabsorbedflux):
warnings.warn('Time or brightness not specified when adding photometry, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Band: "' + band + '", AB magnitude: "' + magnitude + '"')
return
if (not host and not is_number(time)) or (not is_number(magnitude) and not is_number(flux) and not is_number(fluxdensity) and not is_number(counts)):
warnings.warn('Time or brightness not numerical, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Band: "' + band + '", AB magnitude: "' + magnitude + '"')
return
if ((e_magnitude and not is_number(e_magnitude)) or (e_flux and not is_number(e_flux)) or
(e_fluxdensity and not is_number(e_fluxdensity)) or (e_counts and not is_number(e_counts))):
warnings.warn('Brightness error not numerical, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Band: "' + band + '", AB error: "' + e_magnitude + '"')
return
if e_time and not is_number(e_time):
warnings.warn('Time error not numerical, not adding.')
tprint('Name : "' + name + '", Time: "' + time + '", Time error: "' + e_time + '"')
return
if (flux or fluxdensity) and ((not u_flux and not u_fluxdensity) or (not frequency and not band and not energy)):
warnings.warn('Unit and band/frequency must be set when adding photometry by flux or flux density, not adding.')
tprint('Name : "' + name + '", Time: "' + time)
return
if not source:
ValueError('Photometry must have source before being added!')
if is_erroneous(name, 'photometry', source):
return
# Do some basic homogenization
sband = bandrepf(band)
sinstrument = instrument
ssystem = system
stelescope = telescope
if not sinstrument:
sinstrument = bandmetaf(sband, 'instrument')
if not stelescope:
stelescope = bandmetaf(sband, 'telescope')
if not ssystem:
ssystem = bandmetaf(sband, 'system')
# Look for duplicate data and don't add if duplicate
if 'photometry' in events[name]:
for photo in events[name]['photometry']:
if (same_tag_str(photo, sband, 'band') and
same_tag_str(photo, u_time, 'u_time') and
same_tag_num(photo, time, 'time', canbelist = True) and
same_tag_num(photo, magnitude, 'magnitude') and
(('host' not in photo and not host) or ('host' in photo and host)) and
same_tag_num(photo, flux, 'flux') and
same_tag_num(photo, unabsorbedflux, 'unabsorbedflux') and
same_tag_num(photo, fluxdensity, 'fluxdensity') and
same_tag_num(photo, counts, 'counts') and
same_tag_num(photo, energy, 'energy') and
same_tag_num(photo, frequency, 'frequency') and
same_tag_num(photo, photonindex, 'photonindex') and
same_tag_num(photo, e_magnitude, 'e_magnitude') and
same_tag_num(photo, e_lower_magnitude, 'e_lower_magnitude') and
same_tag_num(photo, e_upper_magnitude, 'e_upper_magnitude') and
same_tag_num(photo, e_flux, 'e_flux') and
same_tag_num(photo, e_unabsorbedflux, 'e_unabsorbedflux') and
same_tag_num(photo, e_fluxdensity, 'e_fluxdensity') and
same_tag_num(photo, e_counts, 'e_counts') and
same_tag_num(photo, u_flux, 'u_flux') and
same_tag_num(photo, u_fluxdensity, 'u_fluxdensity') and
same_tag_num(photo, u_frequency, 'u_frequency') and
same_tag_num(photo, u_energy, 'u_energy') and
same_tag_str(photo, ssystem, 'system')
):
return
photoentry = OrderedDict()
if time:
photoentry['time'] = time if isinstance(time, list) or isinstance(time, str) else str(time)
if e_time:
photoentry['e_time'] = str(e_time)
if u_time:
photoentry['u_time'] = u_time
if sband:
photoentry['band'] = sband
if ssystem:
photoentry['system'] = ssystem
if magnitude:
photoentry['magnitude'] = str(magnitude)
if e_magnitude:
photoentry['e_magnitude'] = str(e_magnitude)
if e_lower_magnitude:
photoentry['e_lower_magnitude'] = str(e_lower_magnitude)
if e_upper_magnitude:
photoentry['e_upper_magnitude'] = str(e_upper_magnitude)
if frequency:
photoentry['frequency'] = frequency if isinstance(frequency, list) or isinstance(frequency, str) else str(frequency)
if u_frequency:
photoentry['u_frequency'] = u_frequency
if energy:
photoentry['energy'] = energy if isinstance(energy, list) or isinstance(energy, str) else str(energy)
if u_energy:
photoentry['u_energy'] = u_energy
if flux:
photoentry['flux'] = str(flux)
if e_flux:
photoentry['e_flux'] = str(e_flux)
if unabsorbedflux:
photoentry['unabsorbedflux'] = str(unabsorbedflux)
if e_unabsorbedflux:
photoentry['e_unabsorbedflux'] = str(e_unabsorbedflux)
if u_flux:
photoentry['u_flux'] = str(u_flux)
if photonindex:
photoentry['photonindex'] = str(photonindex)
if fluxdensity:
photoentry['fluxdensity'] = str(fluxdensity)
if e_fluxdensity:
photoentry['e_fluxdensity'] = str(e_fluxdensity)
if u_fluxdensity:
photoentry['u_fluxdensity'] = str(u_fluxdensity)
if counts:
photoentry['counts'] = str(counts)
if e_counts:
photoentry['e_counts'] = str(e_counts)
if upperlimit:
photoentry['upperlimit'] = upperlimit
if host:
photoentry['host'] = host
if includeshost:
photoentry['includeshost'] = includeshost
if observer:
photoentry['observer'] = observer
if survey:
photoentry['survey'] = survey
if observatory:
photoentry['observatory'] = observatory
if stelescope:
photoentry['telescope'] = stelescope
if sinstrument:
photoentry['instrument'] = sinstrument
if nhmw:
photoentry['nhmw'] = nhmw
if source:
photoentry['source'] = source
events[name].setdefault('photometry',[]).append(photoentry)
def trim_str_arr(arr, length = 10):
return [str(round_sig(float(x), length)) if (len(x) > length and len(str(round_sig(float(x), length))) < len(x)) else x for x in arr]
def add_spectrum(name, waveunit, fluxunit, wavelengths = "", fluxes = "", u_time = "", time = "", instrument = "",
deredshifted = "", dereddened = "", errorunit = "", errors = "", source = "", snr = "", telescope = "",
observer = "", reducer = "", filename = "", observatory = "", data = ""):
if is_erroneous(name, 'spectra', source):
return
spectrumentry = OrderedDict()
if 'spectra' in events[name]:
for si, spectrum in enumerate(events[name]['spectra']):
if 'filename' in spectrum and spectrum['filename'] == filename:
# Copy exclude info
if 'exclude' in spectrum:
spectrumentry['exclude'] = spectrum['exclude']
# Don't add duplicate spectra
if 'data' in spectrum:
return
del(events[name]['spectra'][si])
break
if not waveunit:
warnings.warn('No error unit specified, not adding spectrum.')
return
if not fluxunit:
warnings.warn('No flux unit specified, not adding spectrum.')
return
if not data or (not wavelengths or not fluxes):
ValueError('Spectrum must have wavelengths and fluxes set, or data set.')
if not source:
ValueError('Spectrum must have source before being added!')
if deredshifted != '':
spectrumentry['deredshifted'] = deredshifted
if dereddened != '':
spectrumentry['dereddened'] = dereddened
if instrument:
spectrumentry['instrument'] = instrument
if telescope:
spectrumentry['telescope'] = telescope
if observatory:
spectrumentry['observatory'] = observatory
if u_time:
spectrumentry['u_time'] = u_time
if time:
spectrumentry['time'] = time
if snr:
spectrumentry['snr'] = snr
if observer:
spectrumentry['observer'] = observer
if reducer:
spectrumentry['reducer'] = reducer
if filename:
spectrumentry['filename'] = filename
spectrumentry['waveunit'] = waveunit
spectrumentry['fluxunit'] = fluxunit
if data:
spectrumentry['data'] = data
else:
if errors and max([float(x) for x in errors]) > 0.:
if not errorunit:
warnings.warn('No error unit specified, not adding spectrum.')
return
spectrumentry['errorunit'] = errorunit
data = [trim_str_arr(wavelengths), trim_str_arr(fluxes), trim_str_arr(errors)]
else:
data = [trim_str_arr(wavelengths), trim_str_arr(fluxes)]
spectrumentry['data'] = [list(i) for i in zip(*data)]
if source:
spectrumentry['source'] = source
events[name].setdefault('spectra',[]).append(spectrumentry)
def is_erroneous(name, field, sources):
if 'errors' in events[name]:
for alias in sources.split(','):
source = get_source_by_alias(name, alias)
if ('bibcode' in source and source['bibcode'] in
[x['value'] for x in events[name]['errors'] if x['kind'] == 'bibcode' and x['extra'] == field]):
return True
if ('name' in source and source['name'] in
[x['value'] for x in events[name]['errors'] if x['kind'] == 'name' and x['extra'] == field]):
return True
return False
def add_quantity(name, quantity, value, sources, forcereplacebetter = False,
lowerlimit = '', upperlimit = '', error = '', unit = '', kind = '', extra = ''):
if not quantity:
raise(ValueError('Quantity must be specified for add_quantity.'))
if not sources:
raise(ValueError('Source must be specified for quantity before it is added.'))
if not isinstance(value, str) and (not isinstance(value, list) or not isinstance(value[0], str)):
raise(ValueError('Quantity must be a string or an array of strings.'))
if is_erroneous(name, quantity, sources):
return
svalue = value.strip()
serror = error.strip()
skind = kind.strip()
sunit = ''
if not svalue or svalue == '--' or svalue == '-':
return
if serror and (not is_number(serror) or float(serror) < 0.):
raise(ValueError('Quanta error value must be a number and positive.'))
#Set default units
if not unit and quantity == 'velocity':
unit = 'km/s'
if not unit and quantity == 'ra':
unit = 'hours'
if not unit and quantity == 'dec':
unit = 'degrees'
if not unit and quantity in ['lumdist', 'comovingdist']:
unit = 'Mpc'
#Handle certain quantity
if quantity == 'alias':
svalue = name_clean(svalue)
if 'distinctfrom' in events[name]:
if svalue in [x['value'] for x in events[name]['distinctfrom']]:
return
if quantity in ['velocity', 'redshift', 'ebv', 'lumdist', 'comovingdist']:
if not is_number(svalue):
return
if quantity == 'host':
if is_number(svalue):
return
if svalue.lower() in ['anonymous', 'anon.', 'anon', 'intergalactic']:
return
if svalue.startswith('M ') and is_number(svalue[2:]):
svalue.replace('M ', 'M', 1)
svalue = svalue.strip("()").replace(' ', ' ', 1)
svalue = svalue.replace("Abell", "Abell ", 1)
svalue = svalue.replace("APMUKS(BJ)", "APMUKS(BJ) ", 1)
svalue = svalue.replace("ARP", "ARP ", 1)
svalue = svalue.replace("CGCG", "CGCG ", 1)
svalue = svalue.replace("HOLM", "HOLM ", 1)
svalue = svalue.replace("IC", "IC ", 1)
svalue = svalue.replace("Intergal.", "Intergalactic", 1)
svalue = svalue.replace("MCG+", "MCG +", 1)
svalue = svalue.replace("MCG-", "MCG -", 1)
svalue = svalue.replace("M+", "MCG +", 1)
svalue = svalue.replace("M-", "MCG -", 1)
svalue = svalue.replace("MGC ", "MCG ", 1)
svalue = svalue.replace("Mrk", "MRK", 1)
svalue = svalue.replace("MRK", "MRK ", 1)
svalue = svalue.replace("NGC", "NGC ", 1)
svalue = svalue.replace("PGC", "PGC ", 1)
svalue = svalue.replace("SDSS", "SDSS ", 1)
svalue = svalue.replace("UGC", "UGC ", 1)
if len(svalue) > 4 and svalue.startswith("PGC "):
svalue = svalue[:4] + svalue[4:].lstrip(" 0")
if len(svalue) > 4 and svalue.startswith("UGC "):
svalue = svalue[:4] + svalue[4:].lstrip(" 0")
if len(svalue) > 5 and svalue.startswith(("MCG +", "MCG -")):
svalue = svalue[:5] + '-'.join([x.zfill(2) for x in svalue[5:].strip().split("-")])
if len(svalue) > 5 and svalue.startswith("CGCG "):
svalue = svalue[:5] + '-'.join([x.zfill(3) for x in svalue[5:].strip().split("-")])
if (len(svalue) > 1 and svalue.startswith("E")) or (len(svalue) > 3 and svalue.startswith('ESO')):
if svalue[0] == "E":
esplit = svalue[1:].split("-")
else:
esplit = svalue[3:].split("-")
if len(esplit) == 2 and is_number(esplit[0].strip()):
if esplit[1].strip()[0] == 'G':
parttwo = esplit[1][1:].strip()
else:
parttwo = esplit[1].strip()
if is_number(parttwo.strip()):
svalue = 'ESO ' + esplit[0].lstrip('0') + '-G' + parttwo.lstrip('0')
svalue = ' '.join(svalue.split())
if (not skind and ((svalue.lower().startswith('abell') and is_number(svalue[5:].strip())) or
'cluster' in svalue.lower())):
skind = 'cluster'
elif quantity == 'claimedtype':
isq = False
svalue = svalue.replace('young', '')
if '?' in svalue:
isq = True
svalue = svalue.strip(' ?')
for rep in typereps:
if svalue in typereps[rep]:
svalue = rep
break
if isq:
svalue = svalue + '?'
elif quantity in ['ra', 'dec', 'hostra', 'hostdec']:
if unit == 'floatdegrees':
deg = float('%g' % Decimal(svalue))
sig = get_sig_digits(svalue)
if 'ra' in quantity:
flhours = deg / 360.0 * 24.0
hours = floor(flhours)
minutes = floor((flhours - hours) * 60.0)
seconds = (flhours * 60.0 - (hours * 60.0 + minutes)) * 60.0
if seconds > 60.0:
raise(ValueError('Invalid seconds value for ' + quantity))
svalue = str(hours).zfill(2) + ':' + str(minutes).zfill(2) + ':' + zpad(pretty_num(seconds, sig = sig - 1))
elif 'dec' in quantity:
fldeg = abs(deg)
degree = floor(fldeg)
minutes = floor((fldeg - degree) * 60.0)
seconds = (fldeg * 60.0 - (degree * 60.0 + minutes)) * 60.0
if seconds > 60.0:
raise(ValueError('Invalid seconds value for ' + quantity))
svalue = (('+' if deg >= 0.0 else '-') + str(degree).strip('+-').zfill(2) + ':' +
str(minutes).zfill(2) + ':' + zpad(pretty_num(seconds, sig = sig - 1)))
elif unit == 'nospace' and 'ra' in quantity:
svalue = svalue[:2] + ':' + svalue[2:4] + ((':' + zpad(svalue[4:])) if len(svalue) > 4 else '')
elif unit == 'nospace' and 'dec' in quantity:
if svalue.startswith(('+', '-')):
svalue = svalue[:3] + ':' + svalue[3:5] + ((':' + zpad(svalue[5:])) if len(svalue) > 5 else '')
else:
svalue = '+' + svalue[:2] + ':' + svalue[2:4] + ((':' + zpad(svalue[4:])) if len(svalue) > 4 else '')
else:
svalue = svalue.replace(' ', ':')
if 'dec' in quantity:
valuesplit = svalue.split(':')
svalue = (('-' if valuesplit[0].startswith('-') else '+') + valuesplit[0].strip('+-').zfill(2) +
(':' + valuesplit[1].zfill(2) if len(valuesplit) > 1 else '') +
(':' + zpad(valuesplit[2]) if len(valuesplit) > 2 else ''))
if 'ra' in quantity:
sunit = 'hours'
elif 'dec' in quantity:
sunit = 'degrees'
# Correct case of arcseconds = 60.0.
valuesplit = svalue.split(':')
if len(valuesplit) == 3 and valuesplit[-1] in ["60.0", "60.", "60"]:
svalue = valuesplit[0] + ':' + str(Decimal(valuesplit[1]) + Decimal(1.0)) + ':' + "00.0"
# Strip trailing dots.
svalue = svalue.rstrip('.')
elif quantity == 'maxdate' or quantity == 'discoverdate':
# Make sure month and day have leading zeroes
sparts = svalue.split('/')
if len(sparts) >= 2:
svalue = sparts[0] + '/' + sparts[1].zfill(2)
if len(sparts) == 3:
svalue = svalue + '/' + sparts[2].zfill(2)
if quantity in events[name]:
for i, ct in enumerate(events[name][quantity]):
# Only add dates if they have more information
if len(ct['value'].split('/')) > len(svalue.split('/')):
return
if is_number(svalue):
svalue = '%g' % Decimal(svalue)
if serror:
serror = '%g' % Decimal(serror)
if quantity in events[name]:
for i, ct in enumerate(events[name][quantity]):
if ct['value'] == svalue and sources:
if 'kind' in ct and skind and ct['kind'] != skind:
return
for source in sources.split(','):
if source not in events[name][quantity][i]['source'].split(','):
events[name][quantity][i]['source'] += ',' + source
if serror and 'error' not in events[name][quantity][i]:
events[name][quantity][i]['error'] = serror
return
if not sunit:
sunit = unit
quantaentry = OrderedDict()
quantaentry['value'] = svalue
if serror:
quantaentry['error'] = serror
if sources:
quantaentry['source'] = sources
if skind:
quantaentry['kind'] = skind
if sunit:
quantaentry['unit'] = sunit
if lowerlimit:
quantaentry['lowerlimit'] = lowerlimit
if upperlimit:
quantaentry['upperlimit'] = upperlimit
if extra:
quantaentry['extra'] = extra
if (forcereplacebetter or quantity in repbetterquantity) and quantity in events[name]:
newquantities = []
isworse = True
if quantity in ['discoverdate', 'maxdate']:
for ct in events[name][quantity]:
ctsplit = ct['value'].split('/')
svsplit = svalue.split('/')
if len(ctsplit) < len(svsplit):
isworse = False
continue
elif len(ctsplit) < len(svsplit) and len(svsplit) == 3:
if max(2,get_sig_digits(ctsplit[-1].lstrip('0'))) < max(2,get_sig_digits(svsplit[-1].lstrip('0'))):
isworse = False
continue
newquantities.append(ct)
else:
newsig = get_sig_digits(svalue)
for ct in events[name][quantity]:
if 'error' in ct:
if serror:
if float(serror) < float(ct['error']):
isworse = False
continue
newquantities.append(ct)
else:
if serror:
isworse = False
continue
oldsig = get_sig_digits(ct['value'])
if oldsig >= newsig:
newquantities.append(ct)
if newsig >= oldsig:
isworse = False
if not isworse:
newquantities.append(quantaentry)
events[name][quantity] = newquantities
else:
events[name].setdefault(quantity,[]).append(quantaentry)
def load_cached_url(url, filepath, timeout = 120, write = True):
filemd5 = ''
filetxt = ''
if not args.refresh and os.path.isfile(filepath):
with codecs.open(filepath, 'r', encoding='utf8') as f:
filetxt = f.read()
if args.update:
filemd5 = md5(filetxt.encode('utf-8')).hexdigest()
try:
session = requests.Session()
response = session.get(url, timeout = timeout)
if any([x.status_code == 307 for x in response.history]):
raise
txt = response.text
newmd5 = md5(txt.encode('utf-8')).hexdigest()
#tprint(filemd5 + ":" + newmd5)
if args.update and newmd5 == filemd5:
tprint('Skipping file in "' + currenttask + '," local and remote copies identical [' + newmd5 + '].')
return False
except:
return filetxt
else:
if write:
with codecs.open(filepath, 'w', encoding='utf8') as f:
f.write(txt)
return txt
def make_date_string(year, month = '', day = ''):
if not year:
raise ValueError('At least the year must be specified when constructing date string')
datestring = str(year)
if month:
datestring = datestring + '/' + str(month).zfill(2)
if day:
datestring = datestring + '/' + str(day).zfill(2)
return datestring
def get_max_light(name):
if 'photometry' not in events[name]:
return (None, None, None, None)
eventphoto = [(x['u_time'], x['time'], Decimal(x['magnitude']), x['band'] if 'band' in x else '', x['source']) for x in events[name]['photometry'] if
('magnitude' in x and 'time' in x and 'u_time' in x and 'upperlimit' not in x)]
if not eventphoto:
return (None, None, None, None)
mlmag = None
for mb in maxbands:
leventphoto = [x for x in eventphoto if x[3] in mb]
if leventphoto:
mlmag = min([x[2] for x in leventphoto])
eventphoto = leventphoto
break
if not mlmag:
mlmag = min([x[2] for x in eventphoto])
mlindex = [x[2] for x in eventphoto].index(mlmag)
mlband = eventphoto[mlindex][3]
mlsource = eventphoto[mlindex][4]
if eventphoto[mlindex][0] == 'MJD':
mlmjd = float(eventphoto[mlindex][1])
return (astrotime(mlmjd, format='mjd').datetime, mlmag, mlband, mlsource)
else:
return (None, mlmag, mlband, mlsource)
def get_first_light(name):
if 'photometry' not in events[name]:
return (None, None)
eventphoto = [(Decimal(x['time']) if isinstance(x['time'], str) else Decimal(min(float(y) for y in x['time'])),
x['source']) for x in events[name]['photometry'] if 'upperlimit' not in x
and 'time' in x and 'u_time' in x and x['u_time'] == 'MJD']
if not eventphoto:
return (None, None)
flmjd = min([x[0] for x in eventphoto])
flindex = [x[0] for x in eventphoto].index(flmjd)
flmjd = float(flmjd)
flsource = eventphoto[flindex][1]
return (astrotime(flmjd, format='mjd').datetime, flsource)
def set_first_max_light(name):
if 'maxappmag' not in events[name]:
(mldt, mlmag, mlband, mlsource) = get_max_light(name)
if mldt:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxdate', make_date_string(mldt.year, mldt.month, mldt.day), uniq_cdl([source,mlsource]))
if mlmag:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxappmag', pretty_num(mlmag), uniq_cdl([source,mlsource]))
if mlband:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxband', mlband, uniq_cdl([source,mlsource]))
if 'discoverdate' not in events[name] or max([len(x['value'].split('/')) for x in events[name]['discoverdate']]) < 3:
(fldt, flsource) = get_first_light(name)
if fldt:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', make_date_string(fldt.year, fldt.month, fldt.day), uniq_cdl([source,flsource]))
if 'discoverdate' not in events[name] and 'spectra' in events[name]:
minspecmjd = float("+inf")
for spectrum in events[name]['spectra']:
if 'time' in spectrum and 'u_time' in spectrum:
if spectrum['u_time'] == 'MJD':
mjd = float(spectrum['time'])
elif spectrum['u_time'] == 'JD':
mjd = float(jd_to_mjd(Decimal(spectrum['time'])))
else:
continue
if mjd < minspecmjd:
minspecmjd = mjd
minspecsource = spectrum['source']
if minspecmjd < float("+inf"):
fldt = astrotime(minspecmjd, format='mjd').datetime
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', make_date_string(fldt.year, fldt.month, fldt.day), 'D,' + minspecsource)
def get_best_redshift(name):
bestsig = -1
bestkind = 10
for z in events[name]['redshift']:
kind = prefkinds.index(z['kind'] if 'kind' in z else '')
sig = get_sig_digits(z['value'])
if sig > bestsig and kind <= bestkind:
bestz = z['value']
bestkind = kind
bestsig = sig
return (bestz, bestkind, bestsig)
def jd_to_mjd(jd):
return jd - Decimal(2400000.5)
def utf8(x):
return str(x, 'utf-8')
def convert_aq_output(row):
return OrderedDict([(x, str(row[x]) if is_number(row[x]) else row[x]) for x in row.colnames])
def set_preferred_names():
if not len(events):
load_stubs()
for name in list(sorted(list(events.keys()))):
if name not in events:
continue
newname = ''
aliases = get_aliases(name)
if len(aliases) <= 1:
continue
if (name.startswith('SN') and ((is_number(name[2:6]) and not is_number(name[6:])) or
(is_number(name[2:5]) and not is_number(name[5:])))):
continue
for alias in aliases:
if (alias[:2] == 'SN' and ((is_number(alias[2:6]) and not is_number(alias[6:])) or
(is_number(alias[2:5]) and not is_number(alias[5:])))):
newname = alias
break
if not newname and 'discoverer' in events[name]:
discoverer = ','.join([x['value'].upper() for x in events[name]['discoverer']])
if 'ASAS' in discoverer:
for alias in aliases:
if 'ASASSN' in alias.upper():
newname = alias
break
if not newname and 'OGLE' in discoverer:
for alias in aliases:
if 'OGLE' in alias.upper():
newname = alias
break
if not newname and 'CRTS' in discoverer:
for alias in aliases:
if True in [x in alias.upper() for x in ['CSS', 'MLS', 'SSS', 'SNHUNT']]:
newname = alias
break
if not newname and 'PS1' in discoverer:
for alias in aliases:
if 'PS1' in alias.upper():
newname = alias
break
if not newname and 'PTF' in discoverer:
for alias in aliases:
if 'PTF' in alias.upper():
newname = alias
break
if not newname and 'GAIA' in discoverer:
for alias in aliases:
if 'GAIA' in alias.upper():
newname = alias
break
if not newname:
for alias in aliases:
# Always prefer another alias over PSN
if name.startswith('PSN'):
newname = alias
break
if newname and name != newname:
# Make sure new name doesn't already exist
if load_event_from_file(newname):
continue
if load_event_from_file(name, delete = True):
tprint('Changing event name (' + name + ') to preferred name (' + newname + ').')
events[newname] = events[name]
events[newname]['name'] = newname
del(events[name])
journal_events()
# Merge and remove duplicate events
def merge_duplicates():
if not len(events):
load_stubs()
currenttask = 'Merging duplicate events'
keys = list(sorted(list(events.keys())))
for n1, name1 in enumerate(tq(keys[:], currenttask)):
if name1 not in events:
continue
allnames1 = get_aliases(name1) + (['AT' + name1[2:]] if (name1.startswith('SN') and is_number(name1[2:6])) else [])
for name2 in keys[n1+1:]:
if name2 not in events or name1 == name2:
continue
allnames2 = get_aliases(name2) + (['AT' + name2[2:]] if (name2.startswith('SN') and is_number(name2[2:6])) else [])
if any(i in allnames1 for i in allnames2):
tprint('Found single event with multiple entries (' + name1 + ' and ' + name2 + '), merging.')
load1 = load_event_from_file(name1, delete = True)
load2 = load_event_from_file(name2, delete = True)
if load1 and load2:
priority1 = 0
priority2 = 0
for an in allnames1:
if len(an) >= 2 and an.startswith(('SN', 'AT')):
priority1 = priority1 + 1
for an in allnames2:
if len(an) >= 2 and an.startswith(('SN', 'AT')):
priority2 = priority2 + 1
if priority1 > priority2:
copy_to_event(name2, name1)
keys.append(name1)
del(events[name2])
else:
copy_to_event(name1, name2)
keys.append(name2)
del(events[name1])
else:
print ('Duplicate already deleted')
journal_events()
def derive_and_sanitize():
biberrordict = {
"2012Sci..337..942D":"2012Sci...337..942D",
"2012MNRAS.420.1135":"2012MNRAS.420.1135S",
"2014MNRAS.438,368":"2014MNRAS.438..368T",
"2006ApJ...636...400Q":"2006ApJ...636..400Q",
"0609268":"2007AJ....133...58K",
"2004MNRAS.tmp..131P":"2004MNRAS.352..457P",
"2013MNRAS.tmp.1499F":"2013MNRAS.433.1312F",
"1991MNRAS.247P.410B":"1991A&A...247..410B",
"2011Sci.333..856S":"2011Sci...333..856S"
}
# Calculate some columns based on imported data, sanitize some fields
for name in events:
aliases = get_aliases(name, includename = False)
if name not in aliases:
if 'sources' in events[name]:
add_quantity(name, 'alias', name, '1')
else:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'alias', name, source)
if (name.startswith('SN') and is_number(name[2:6]) and 'discoverdate' in events[name] and
int(events[name]['discoverdate'][0]['value'].split('/')[0]) >= 2016 and not any(['AT' in x for x in aliases])):
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'alias', 'AT' + name[2:], source)
events[name]['alias'] = list(sorted(events[name]['alias'], key=lambda key: alias_priority(name, key)))
aliases = get_aliases(name)
set_first_max_light(name)
if 'claimedtype' in events[name]:
events[name]['claimedtype'] = list(sorted(events[name]['claimedtype'], key=lambda key: ct_priority(name, key)))
if 'discoverdate' not in events[name]:
prefixes = ['MLS', 'SSS', 'CSS']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:2]):
discoverdate = '/'.join(['20' + alias.replace(prefix, '')[:2],
alias.replace(prefix, '')[2:4], alias.replace(prefix, '')[4:6]])
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'discoverdate' not in events[name]:
prefixes = ['ASASSN-', 'PS1-', 'PS1', 'PS', 'iPTF', 'PTF', 'SCP-']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:2]):
discoverdate = '20' + alias.replace(prefix, '')[:2]
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'discoverdate' not in events[name]:
prefixes = ['SNF']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:4]):
discoverdate = '/'.join([alias.replace(prefix, '')[:4],
alias.replace(prefix, '')[4:6], alias.replace(prefix, '')[6:8]])
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'discoverdate' not in events[name]:
prefixes = ['AT', 'SN']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:4]):
discoverdate = alias.replace(prefix, '')[:4]
if args.verbose:
tprint ('Added discoverdate from name: ' + discoverdate)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'discoverdate', discoverdate, source)
break
if 'discoverdate' in events[name]:
break
if 'ra' not in events[name] or 'dec' not in events[name]:
prefixes = ['PSN J', 'MASJ', 'CSS', 'SSS', 'MASTER OT J']
for alias in aliases:
for prefix in prefixes:
if alias.startswith(prefix) and is_number(alias.replace(prefix, '')[:6]):
noprefix = alias.split(':')[-1].replace(prefix, '').replace('.', '')
decsign = '+' if '+' in noprefix else '-'
noprefix = noprefix.replace('+','|').replace('-','|')
nops = noprefix.split('|')
if len(nops) < 2:
continue
rastr = nops[0]
decstr = nops[1]
ra = ':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) + ('.' + rastr[6:] if len(rastr) > 6 else '')
dec = decsign + ':'.join([decstr[:2], decstr[2:4], decstr[4:6]]) + ('.' + decstr[6:] if len(decstr) > 6 else '')
if args.verbose:
tprint ('Added ra/dec from name: ' + ra + ' ' + dec)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'ra', ra, source)
add_quantity(name, 'dec', dec, source)
break
if 'ra' in events[name]:
break
if ('ra' in events[name] and 'dec' in events[name] and
(not 'host' in events[name] or not any([x['value'] == 'Milky Way' for x in events[name]['host']]))):
if name not in extinctionsdict:
try:
result = IrsaDust.get_query_table(events[name]['ra'][0]['value'] + " " + events[name]['dec'][0]['value'], section = 'ebv')
except:
warnings.warn("Coordinate lookup for " + name + " failed in IRSA.")
else:
ebv = result['ext SandF mean'][0]
ebverr = result['ext SandF std'][0]
extinctionsdict[name] = [ebv, ebverr]
if name in extinctionsdict:
source = add_source(name, bibcode = '2011ApJ...737..103S')
add_quantity(name, 'ebv', str(extinctionsdict[name][0]), source, error = str(extinctionsdict[name][1]))
if 'claimedtype' in events[name]:
events[name]['claimedtype'][:] = [ct for ct in events[name]['claimedtype'] if (ct['value'] != '?' and ct['value'] != '-')]
if 'claimedtype' not in events[name] and name.startswith('AT'):
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'claimedtype', 'Candidate', source)
if 'redshift' not in events[name] and 'velocity' in events[name]:
# Find the "best" velocity to use for this
bestsig = 0
for hv in events[name]['velocity']:
sig = get_sig_digits(hv['value'])
if sig > bestsig:
besthv = hv['value']
bestsig = sig
if bestsig > 0 and is_number(besthv):
voc = float(besthv)*1.e5/clight
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'redshift', pretty_num(sqrt((1. + voc)/(1. - voc)) - 1., sig = bestsig), source, kind = 'heliocentric')
if 'redshift' not in events[name] and has_task('nedd') and 'host' in events[name]:
reference = "NED-D"
refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
for host in events[name]['host']:
if host['value'] in nedddict:
secondarysource = add_source(name, refname = reference, url = refurl, secondary = True)
meddist = statistics.median(nedddict[host['value']])
redshift = pretty_num(z_at_value(cosmo.comoving_distance, float(meddist) * un.Mpc), sig = get_sig_digits(str(meddist)))
add_quantity(name, 'redshift', redshift, secondarysource, kind = 'host')
if 'maxabsmag' not in events[name] and 'maxappmag' in events[name] and 'lumdist' in events[name]:
# Find the "best" distance to use for this
bestsig = 0
for ld in events[name]['lumdist']:
sig = get_sig_digits(ld['value'])
if sig > bestsig:
bestld = ld['value']
bestsig = sig
if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxabsmag', pretty_num(float(events[name]['maxappmag'][0]['value']) -
5.0*(log10(float(bestld)*1.0e6) - 1.0), sig = bestsig), source)
if 'redshift' in events[name]:
# Find the "best" redshift to use for this
(bestz, bestkind, bestsig) = get_best_redshift(name)
if bestsig > 0:
bestz = float(bestz)
if 'velocity' not in events[name]:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'velocity', pretty_num(clight/km*((bestz + 1.)**2. - 1.)/
((bestz + 1.)**2. + 1.), sig = bestsig), source, kind = prefkinds[bestkind])
if bestz > 0.:
if 'lumdist' not in events[name]:
dl = cosmo.luminosity_distance(bestz)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'lumdist', pretty_num(dl.value, sig = bestsig), source, kind = prefkinds[bestkind])
if 'maxabsmag' not in events[name] and 'maxappmag' in events[name]:
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'maxabsmag', pretty_num(float(events[name]['maxappmag'][0]['value']) -
5.0*(log10(dl.to('pc').value) - 1.0), sig = bestsig), source)
if 'comovingdist' not in events[name]:
dl = cosmo.comoving_distance(bestz)
source = add_source(name, bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
add_quantity(name, 'comovingdist', pretty_num(dl.value, sig = bestsig), source)
if 'photometry' in events[name]:
events[name]['photometry'].sort(key=lambda x: ((float(x['time']) if isinstance(x['time'], str) else
min([float(y) for y in x['time']])) if 'time' in x else 0.0,
x['band'] if 'band' in x else '', float(x['magnitude']) if 'magnitude' in x else ''))
if 'spectra' in events[name] and list(filter(None, ['time' in x for x in events[name]['spectra']])):
events[name]['spectra'].sort(key=lambda x: (float(x['time']) if 'time' in x else 0.0))
if 'sources' in events[name]:
for source in events[name]['sources']:
if 'bibcode' in source:
#First sanitize the bibcode
if len(source['bibcode']) != 19:
source['bibcode'] = urllib.parse.unquote(unescape(source['bibcode'])).replace('A.A.', 'A&A')
if source['bibcode'] in biberrordict:
source['bibcode'] = biberrordict[source['bibcode']]
if source['bibcode'] not in bibauthordict:
bibcode = source['bibcode']
adsquery = ('http://adsabs.harvard.edu/cgi-bin/nph-abs_connect?db_key=ALL&version=1&bibcode=' +
urllib.parse.quote(bibcode) + '&data_type=Custom&format=%253m%20%25(y)')
response = urllib.request.urlopen(adsquery)
html = response.read().decode('utf-8')
hsplit = html.split("\n")
if len(hsplit) > 5:
bibcodeauthor = hsplit[5]
else:
bibcodeauthor = ''
if not bibcodeauthor:
warnings.warn("Bibcode didn't return authors, not converting this bibcode.")
bibauthordict[bibcode] = unescape(bibcodeauthor).strip()
for source in events[name]['sources']:
if 'bibcode' in source and source['bibcode'] in bibauthordict and bibauthordict[source['bibcode']]:
source['reference'] = bibauthordict[source['bibcode']]
if 'name' not in source and source['bibcode']:
source['name'] = source['bibcode']
if 'redshift' in events[name]:
events[name]['redshift'] = list(sorted(events[name]['redshift'], key=lambda key: frame_priority(key)))
if 'velocity' in events[name]:
events[name]['velocity'] = list(sorted(events[name]['velocity'], key=lambda key: frame_priority(key)))
if 'claimedtype' in events[name]:
events[name]['claimedtype'] = list(sorted(events[name]['claimedtype'], key=lambda key: ct_priority(name, key)))
events[name] = OrderedDict(sorted(events[name].items(), key=lambda key: event_attr_priority(key[0])))
def delete_old_event_files():
# Delete all old event JSON files
files = repo_file_list()
for f in files:
os.remove(f)
def write_all_events(empty = False, gz = False, bury = False):
# Write it all out!
for name in events:
if 'stub' in events[name]:
if not empty:
continue
else:
del(events[name]['stub'])
if args.verbose and not args.travis:
tprint('Writing ' + name)
filename = get_event_filename(name)
outdir = '../'
if 'discoverdate' in events[name]:
for r, year in enumerate(repoyears):
if int(events[name]['discoverdate'][0]['value'].split('/')[0]) <= year:
outdir += repofolders[r]
break
else:
outdir += str(repofolders[0])
# Delete non-SN events here without IAU designations (those with only banned types)
if bury:
buryevent = False
nonsneprefixes = ('PNVJ', 'PNV J', 'OGLE-2013-NOVA')
if name.startswith(nonsneprefixes):
tprint('Burying ' + name + ', non-SNe prefix.')
continue
if 'claimedtype' in events[name] and not (name.startswith('SN') and is_number(name[2:6])):
for ct in events[name]['claimedtype']:
if ct['value'].upper() not in nonsnetypes and ct['value'].upper() != 'CANDIDATE':
buryevent = False
break
if ct['value'].upper() in nonsnetypes:
buryevent = True
if buryevent:
tprint('Burying ' + name + ' (' + ct['value'] + ').')
outdir = '../sne-boneyard'
jsonstring = json.dumps({name:events[name]}, indent='\t', separators=(',', ':'), ensure_ascii=False)
path = outdir + '/' + filename + '.json'
with codecs.open(path, 'w', encoding='utf8') as f:
f.write(jsonstring)
if gz:
if os.path.getsize(path) > 90000000:
if not args.travis:
tprint('Compressing ' + name)
with open(path, 'rb') as f_in, gzip.open(path + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(path)
os.system('cd ' + outdir + '; git rm ' + filename + '.json; git add -f ' + filename + '.json.gz; cd ' + '../scripts')
def null_field(obj, field):
return obj[field] if field in obj else ''
def copy_to_event(fromname, destname):
tprint('Copying ' + fromname + ' to event ' + destname)
newsourcealiases = {}
keys = list(sorted(events[fromname].keys(), key=lambda key: event_attr_priority(key)))
if 'sources' in events[fromname]:
for source in events[fromname]['sources']:
newsourcealiases[source['alias']] = (add_source(destname,
bibcode = source['bibcode'] if 'bibcode' in source else '',
refname = source['name'] if 'name' in source else '',
reference = source['reference'] if 'reference' in source else '',
url = source['url'] if 'url' in source else ''))
for key in keys:
if key not in ['name', 'sources']:
for item in events[fromname][key]:
isd = False
sources = []
if 'source' not in item:
ValueError("Item has no source!")
for sid in item['source'].split(','):
if sid == 'D':
sources.append('D')
elif sid in newsourcealiases:
sources.append(newsourcealiases[sid])
else:
ValueError("Couldn't find source alias!")
sources = uniq_cdl(sources)
if key == 'photometry':
add_photometry(destname, u_time = null_field(item, "u_time"), time = null_field(item, "time"),
e_time = null_field(item, "e_time"), telescope = null_field(item, "telescope"),
instrument = null_field(item, "instrument"), band = null_field(item, "band"),
magnitude = null_field(item, "magnitude"), e_magnitude = null_field(item, "e_magnitude"),
source = sources, upperlimit = null_field(item, "upperlimit"), system = null_field(item, "system"),
observatory = null_field(item, "observatory"), observer = null_field(item, "observer"),
host = null_field(item, "host"), survey = null_field(item, "survey"))
elif key == 'spectra':
add_spectrum(destname, null_field(item, "waveunit"), null_field(item, "fluxunit"), data = null_field(item, "data"),
u_time = null_field(item, "u_time"), time = null_field(item, "time"),
instrument = null_field(item, "instrument"), deredshifted = null_field(item, "deredshifted"),
dereddened = null_field(item, "dereddened"), errorunit = null_field(item, "errorunit"),
source = sources, snr = null_field(item, "snr"),
telescope = null_field(item, "telescope"), observer = null_field(item, "observer"),
reducer = null_field(item, "reducer"), filename = null_field(item, "filename"),
observatory = null_field(item, "observatory"))
elif key == 'errors':
add_quantity(destname, key, item['value'], sources,
kind = null_field(item, "kind"), extra = null_field(item, "extra"))
else:
add_quantity(destname, key, item['value'], sources, error = null_field(item, "error"),
unit = null_field(item, "unit"), kind = null_field(item, "kind"))
def load_event_from_file(name = '', location = '', clean = False, delete = True, append = False):
if not name and not location:
raise ValueError('Either event name or location must be specified to load event')
path = ''
namepath = ''
if location:
path = location
if name:
indir = '../'
for rep in repofolders:
filename = get_event_filename(name)
newpath = indir + rep + '/' + filename + '.json'
if os.path.isfile(newpath):
namepath = newpath
if not path and not namepath:
return False
else:
newevent = ''
newevent2 = ''
if path or namepath:
if name in events:
del events[name]
if path and namepath:
with open(path, 'r') as f, open(namepath, 'r') as nf:
newevent = json.loads(f.read(), object_pairs_hook=OrderedDict)
newevent2 = json.loads(nf.read(), object_pairs_hook=OrderedDict)
elif path:
with open(path, 'r') as f:
newevent = json.loads(f.read(), object_pairs_hook=OrderedDict)
elif namepath:
with open(namepath, 'r') as f:
newevent = json.loads(f.read(), object_pairs_hook=OrderedDict)
if newevent:
if clean:
newevent = clean_event(newevent)
name = next(reversed(newevent))
if append:
indir = '../'
for rep in repofolders:
filename = get_event_filename(name)
newpath = indir + rep + '/' + filename + '.json'
if os.path.isfile(newpath):
namepath = newpath
if namepath:
with open(namepath, 'r') as f:
newevent2 = json.loads(f.read(), object_pairs_hook=OrderedDict)
namename = next(reversed(newevent2))
if newevent2:
# Needs to be fixed
newevent = OrderedDict([['temp',newevent[name]]])
copy_to_event('temp', namename)
else:
events.update(newevent)
if args.verbose and not args.travis:
tprint('Loaded ' + name)
if 'writeevents' in tasks and delete and namepath:
os.remove(namepath)
return name
def clean_event(dirtyevent):
bibcodes = []
name = next(reversed(dirtyevent))
# This is very hacky and is only necessary because we don't have a proper 'Event' object yet.
events['temp'] = dirtyevent[name]
if 'name' not in events['temp']:
events['temp']['name'] = name
if 'sources' in events['temp']:
# Rebuild the sources
newsources = []
oldsources = events['temp']['sources']
del(events['temp']['sources'])
for s, source in enumerate(oldsources):
if 'bibcode' in source:
bibcodes.append(source['bibcode'])
add_source('temp', bibcode = source['bibcode'])
else:
add_source('temp', refname = source['name'], url = source['url'])
# Clean some legacy fields
if 'aliases' in events['temp'] and isinstance(events['temp']['aliases'], list):
source = add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
for alias in events['temp']['aliases']:
add_quantity('temp', 'alias', alias, source)
del(events['temp']['aliases'])
if ('distinctfrom' in events['temp'] and isinstance(events['temp']['distinctfrom'], list) and
isinstance(events['temp']['distinctfrom'][0], str)):
distinctfroms = [x for x in events['temp']['distinctfrom']]
del(events['temp']['distinctfrom'])
source = add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
for df in distinctfroms:
add_quantity('temp', 'distinctfrom', df, source)
if ('errors' in events['temp'] and isinstance(events['temp']['errors'], list) and
'sourcekind' in events['temp']['errors'][0]):
source = add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
for err in events['temp']['errors']:
add_quantity('temp', 'error', err['quantity'], source, kind = err['sourcekind'], extra = err['id'])
del(events['temp']['errors'])
if not bibcodes:
add_source('temp', bibcode = oscbibcode, refname = oscname, url = oscurl, secondary = True)
bibcodes = [oscbibcode]
for key in list(events['temp'].keys()):
if key in ['name', 'sources']:
pass
elif key == 'photometry':
for p, photo in enumerate(events['temp']['photometry']):
if photo['u_time'] == 'JD':
events['temp']['photometry'][p]['u_time'] = 'MJD'
events['temp']['photometry'][p]['time'] = str(jd_to_mjd(Decimal(photo['time'])))
if bibcodes and 'source' not in photo:
source = add_source('temp', bibcode = bibcodes[0])
events['temp']['photometry'][p]['source'] = source
else:
for qi, quantity in enumerate(events['temp'][key]):
if bibcodes and 'source' not in quantity:
source = add_source('temp', bibcode = bibcodes[0])
events['temp'][key][qi]['source'] = source
cleanevent = events['temp']
del (events['temp'])
return OrderedDict([[name,cleanevent]])
def has_task(task):
return task in tasks and (not args.update or tasks[task]['update'])
def archived_task(task):
if 'archived' in tasks[task] and args.archived:
return True
if ('archived' in tasks[task] and tasks[task]['archived'] and
task not in args.refreshlist.split(',') and not args.fullrefresh):
return True
return False
def do_task(checktask, task, quiet = False):
global currenttask
dotask = has_task(task) and checktask == task
if dotask and not quiet:
currenttask = (tasks[task]['nicename'] if tasks[task]['nicename'] else task).replace('%pre', 'Updating' if args.update else 'Loading')
return dotask
def journal_events(clear = True):
if 'writeevents' in tasks:
write_all_events()
if clear:
clear_events()
def clear_events():
global events
events = OrderedDict((k, OrderedDict([['name', events[k]['name']]] + ([['alias', events[k]['alias']]] if 'alias' in events[k] else []) + [['stub', True]])) for k in events)
def load_stubs():
global currenttask
currenttask = 'Loading event stubs'
files = repo_file_list()
#try:
# namepath = '../names.min.json'
# with open(namepath, 'r') as f:
# names = json.loads(f.read(), object_pairs_hook=OrderedDict)
# for fi in tq(files):
# name = os.path.basename(os.path.splitext(fi)[0])
# if name not in names:
# name = name.replace("_", "/")
# events[name] = OrderedDict(([['name', name], ['alias', [OrderedDict(([['value', x]])) for x in names[name]]], ['stub', True]]))
#except:
# events = OrderedDict()
for fi in tq(files, currenttask):
fname = fi
if '.gz' in fi:
fname = fi.replace('.gz', '')
with gzip.open(fi, 'rb') as f_in, open(fname, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(fi)
name = os.path.basename(os.path.splitext(fname)[0]).replace('.json', '')
name = add_event(name, delete = False, loadifempty = False)
events[name] = OrderedDict(([['name', events[name]['name']]] + ([['alias', events[name]['alias']]] if 'alias' in events[name] else []) + [['stub', True]]))
path = '../atels.json'
if os.path.isfile(path):
with open(path, 'r') as f:
atelsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
atelsdict = OrderedDict()
path = '../cbets.json'
if os.path.isfile(path):
with open(path, 'r') as f:
cbetsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
cbetsdict = OrderedDict()
path = '../iaucs.json'
if os.path.isfile(path):
with open(path, 'r') as f:
iaucsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
iaucsdict = OrderedDict()
for task in tasks:
if do_task(task, 'deleteoldevents'):
currenttask = 'Deleting old events'
delete_old_event_files()
# Import data provided directly to OSC
if do_task(task, 'internal'):
for datafile in tq(sorted(glob("../sne-internal/*.json"), key=lambda s: s.lower()), currenttask):
if args.update:
if not load_event_from_file(location = datafile, clean = True, delete = False, append = True):
raise IOError('Failed to find specified file.')
else:
if not load_event_from_file(location = datafile, clean = True, delete = False):
raise IOError('Failed to find specified file.')
journal_events()
if do_task(task, 'radio'):
for datafile in tq(sorted(glob("../sne-external-radio/*.txt"), key=lambda s: s.lower()), currenttask):
name = add_event(os.path.basename(datafile).split('.')[0])
radiosourcedict = OrderedDict()
with open(datafile, 'r') as f:
for li, line in enumerate([x.strip() for x in f.read().splitlines()]):
if line.startswith('(') and li <= len(radiosourcedict):
radiosourcedict[line.split()[0]] = add_source(name, bibcode = line.split()[-1])
elif li in [x + len(radiosourcedict) for x in range(3)]:
continue
else:
cols = list(filter(None, line.split()))
source = radiosourcedict[cols[6]]
add_photometry(name, time = cols[0], frequency = cols[2], u_frequency = 'GHz', fluxdensity = cols[3],
e_fluxdensity = cols[4], u_fluxdensity = 'µJy', instrument = cols[5], source = source)
add_quantity(name, 'alias', name, source)
journal_events()
if do_task(task, 'xray'):
for datafile in tq(sorted(glob("../sne-external-xray/*.txt"), key=lambda s: s.lower()), currenttask):
name = add_event(os.path.basename(datafile).split('.')[0])
with open(datafile, 'r') as f:
for li, line in enumerate(f.read().splitlines()):
if li == 0:
source = add_source(name, bibcode = line.split()[-1])
elif li in [1,2,3]:
continue
else:
cols = list(filter(None, line.split()))
add_photometry(name, time = cols[:2],
energy = cols[2:4], u_energy = 'keV', counts = cols[4], flux = cols[6],
unabsorbedflux = cols[8], u_flux = 'ergs/s/cm^2',
photonindex = cols[15], instrument = cols[17], nhmw = cols[11],
upperlimit = (float(cols[5]) < 0), source = source)
add_quantity(name, 'alias', name, source)
journal_events()
#if do_task(task, 'simbad'):
# Simbad.list_votable_fields()
# customSimbad = Simbad()
# customSimbad.add_votable_fields('otype', 'id(opt)')
# result = customSimbad.query_object('SN 20[0-9][0-9]*', wildcard=True)
# for r, row in enumerate(result):
# if row['OTYPE'].decode() != "SN":
# continue
# name = row["MAIN_ID"].decode()
# aliases = Simbad.query_objectids(name)
# print(aliases)
# if name[:3] == 'SN ':
# name = 'SN' + name[3:]
# if name[:2] == 'SN' and is_number(name[2:]):
# name = name + 'A'
# name = add_event(name)
# journal_events()
# Import primary data sources from Vizier
if do_task(task, 'vizier'):
Vizier.ROW_LIMIT = -1
# 2012ApJS..200...12H
result = Vizier.get_catalogs("J/ApJS/200/12/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['SN']
if is_number(name[:4]):
name = 'SN' + name
name = add_event(name)
source = add_source(name, bibcode = "2012ApJS..200...12H")
add_quantity(name, 'alias', name, source)
if '[' not in row['Gal']:
add_quantity(name, 'host', row['Gal'].replace('_', ' '), source)
add_quantity(name, 'redshift', str(row['z']), source, kind = 'heliocentric')
add_quantity(name, 'redshift', str(row['zCMB']), source, kind = 'cmb')
add_quantity(name, 'ebv', str(row['E_B-V_']), source, error = str(row['e_E_B-V_']) if row['e_E_B-V_'] else '')
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
# 2012ApJ...746...85S
result = Vizier.get_catalogs("J/ApJ/746/85/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['Name'].replace('SCP', 'SCP-')
name = add_event(name)
source = add_source(name, bibcode = "2012ApJ...746...85S")
add_quantity(name, 'alias', name, source)
if row['f_Name']:
add_quantity(name, 'claimedtype', 'Ia', source)
if row['z']:
add_quantity(name, 'redshift', str(row['z']), source, kind = 'spectroscopic')
else:
add_quantity(name, 'redshift', str(row['zCl']), source, kind = 'cluster')
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
result = Vizier.get_catalogs("J/ApJ/746/85/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['Name'].replace('SCP', 'SCP-')
flux = Decimal(float(row['Flux']))
if flux <= 0.0:
continue
err = Decimal(float(row['e_Flux']))
zp = Decimal(float(row['Zero']))
sig = get_sig_digits(str(row['Flux']))+1
magnitude = pretty_num(zp-Decimal(2.5)*(flux.log10()), sig = sig)
e_magnitude = pretty_num(Decimal(2.5)*(Decimal(1.0) + err/flux).log10(), sig = sig)
if float(e_magnitude) > 5.0:
continue
name = add_event(name)
source = add_source(name, bibcode = "2012ApJ...746...85S")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = str(row['MJD']), band = row['Filter'], instrument = row['Inst'],
magnitude = magnitude, e_magnitude = e_magnitude, source = source)
# 2004ApJ...602..571B
result = Vizier.get_catalogs("J/ApJ/602/571/table8")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = 'SN'+row['SN']
flux = Decimal(float(row['Flux']))
if flux <= 0.0:
continue
err = Decimal(float(row['e_Flux']))
sig = get_sig_digits(str(row['Flux']))+1
magnitude = pretty_num(Decimal(25.0)-Decimal(2.5)*(flux.log10()), sig = sig)
e_magnitude = pretty_num(Decimal(2.5)*(Decimal(1.0) + err/flux).log10(), sig = sig)
if float(e_magnitude) > 5.0:
continue
name = add_event(name)
source = add_source(name, bibcode = "2004ApJ...602..571B")
add_quantity(name, 'alias', name, source)
band = row['Filt']
system = ''
telescope = ''
if band in ['R', 'I']:
system = 'Cousins'
if band == 'Z':
telescope = 'Subaru'
add_photometry(name, time = str(row['MJD']), band = band, system = system, telescope = telescope,
magnitude = magnitude, e_magnitude = e_magnitude, source = source)
# 2014MNRAS.444.3258M
result = Vizier.get_catalogs("J/MNRAS/444/3258/SNe")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
oldname = ''
for row in tq(table, currenttask):
name = row['SN']
if name == oldname:
continue
oldname = name
name = add_event(name)
source = add_source(name, bibcode = '2014MNRAS.444.3258M')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['z']), source, kind = 'heliocentric', error = str(row['e_z']))
add_quantity(name, 'ra', str(row['_RA']), source, unit = 'floatdegrees')
add_quantity(name, 'dec', str(row['_DE']), source, unit = 'floatdegrees')
journal_events()
# 2014MNRAS.438.1391P
result = Vizier.get_catalogs("J/MNRAS/438/1391/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014MNRAS.438.1391P')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['zh']), source, kind = 'heliocentric')
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
journal_events()
# 2012ApJ...749...18B
result = Vizier.get_catalogs("J/ApJ/749/18/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['Name'].replace(' ','')
name = add_event(name)
source = add_source(name, bibcode = '2012ApJ...749...18B')
add_quantity(name, 'alias', name, source)
mjd = str(astrotime(2450000.+row['JD'], format='jd').mjd)
band = row['Filt']
magnitude = str(row['mag'])
e_magnitude = str(row['e_mag'])
e_magnitude = '' if e_magnitude == '--' else e_magnitude
upperlimit = True if row['l_mag'] == '>' else False
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, instrument = 'UVOT',
source = source, upperlimit = upperlimit, telescope = 'Swift', system = 'Swift')
journal_events()
# 2010A&A...523A...7G
result = Vizier.get_catalogs("J/A+A/523/A7/table9")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = 'SNLS-' + row['SNLS']
name = add_event(name)
source = add_source(name, bibcode = '2010A&A...523A...7G')
add_quantity(name, 'alias', name, source)
astrot = astrotime(2450000.+row['Date1'], format='jd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
add_quantity(name, 'redshift', str(row['z']), source, kind = 'heliocentric')
add_quantity(name, 'claimedtype', (row['Type'].replace('*', '?').replace('SN','')
.replace('(pec)',' P').replace('Ia? P?', 'Ia P?')), source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
journal_events()
# 2004A&A...415..863G
result = Vizier.get_catalogs("J/A+A/415/863/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2004A&A...415..863G')
add_quantity(name, 'alias', name, source)
datesplit = row['Date'].split('-')
add_quantity(name, 'discoverdate', make_date_string(datesplit[0], datesplit[1].lstrip('0'), datesplit[2].lstrip('0')), source)
add_quantity(name, 'host', 'Abell ' + str(row['Abell']), source)
add_quantity(name, 'claimedtype', row['Type'], source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
if row['zSN']:
add_quantity(name, 'redshift', str(row['zSN']), source, kind = 'spectroscopic')
else:
add_quantity(name, 'redshift', str(row['zCl']), source, kind = 'cluster')
journal_events()
# 2008AJ....136.2306H
result = Vizier.get_catalogs("J/AJ/136/2306/sources")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = 'SDSS-II ' + str(row['SNID'])
name = add_event(name)
source = add_source(name, bibcode = '2008AJ....136.2306H')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', row['SpType'].replace('SN.', '').strip(':'), source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
# 2010ApJ...708..661D
result = Vizier.get_catalogs("J/ApJ/708/661/sn")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
if not name:
name = 'SDSS-II ' + str(row['SDSS-II'])
else:
name = 'SN' + name
name = add_event(name)
source = add_source(name, bibcode = '2010ApJ...708..661D')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', 'SDSS-II ' + str(row['SDSS-II']), source)
add_quantity(name, 'claimedtype', 'II P', source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
result = Vizier.get_catalogs("J/ApJ/708/661/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
if row['f_SN'] == 'a':
name = 'SDSS-II ' + str(row['SN'])
else:
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2010ApJ...708..661D')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['z']), source, error = str(row['e_z']))
journal_events()
# 2014ApJ...795...44R
result = Vizier.get_catalogs("J/ApJ/795/44/ps1_snIa")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014ApJ...795...44R')
add_quantity(name, 'alias', name, source)
astrot = astrotime(row['tdisc'], format='mjd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
add_quantity(name, 'redshift', str(row['z']), source, error = str(row['e_z']), kind = 'heliocentric')
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
add_quantity(name, 'claimedtype', 'Ia', source)
result = Vizier.get_catalogs("J/ApJ/795/44/table6")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
name = row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014ApJ...795...44R')
add_quantity(name, 'alias', name, source)
if row['mag'] != '--':
add_photometry(name, time = str(row['MJD']), band = row['Filt'], magnitude = str(row['mag']),
e_magnitude = str(row['e_mag']), source = source, system = 'AB', telescope = 'PS1', instrument = 'PS1')
journal_events()
# 1990A&AS...82..145C
result = Vizier.get_catalogs("II/189/mag")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
with open('../sne-external/II_189_refs.csv') as f:
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
ii189bibdict = {}
ii189refdict = {}
for r, row in enumerate(tsvin):
if row[0] != '0':
ii189bibdict[r+1] = row[1]
else:
ii189refdict[r+1] = row[2]
for row in tq(table, currenttask):
if row['band'][0] == '(':
continue
name = 'SN' + row['SN']
name = add_event(name)
source = ''
secsource = add_source(name, bibcode = '1990A&AS...82..145C', secondary = True)
mjd = str(jd_to_mjd(Decimal(row['JD'])))
mag = str(row['m'])
band = row['band'].strip("'")
if row['r_m'] in ii189bibdict:
source = add_source(name, bibcode = ii189bibdict[row['r_m']])
else:
source = add_source(name, refname = ii189refdict[row['r_m']])
add_quantity(name, 'alias', name, source)
add_photometry(name, time = mjd, band = band, magnitude = mag, source = uniq_cdl([source,secsource]))
journal_events()
# 2014yCat.7272....0G
result = Vizier.get_catalogs("VII/272/snrs")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = ''
if row["Names"]:
names = row["Names"].split(',')
for nam in names:
if nam.strip()[:2] == 'SN':
name = nam.strip()
if is_number(name[2:]):
name = name + 'A'
if not name:
for nam in names:
if nam.strip('()') == nam:
name = nam.strip()
break
if not name:
name = row["SNR"].strip()
name = add_event(name)
source = (add_source(name, bibcode = '2014BASI...42...47G') + ',' +
add_source(name, refname = 'Galactic SNRs', url = 'https://www.mrao.cam.ac.uk/surveys/snrs/snrs.data.html'))
add_quantity(name, 'alias', name, source)
add_quantity(name, "alias", row["SNR"].strip(), source)
add_quantity(name, "alias", 'MWSNR '+row["SNR"].strip('G '), source)
if row["Names"]:
names = row["Names"].split(',')
for nam in names:
add_quantity(name, "alias", nam.replace('Vela (XYZ)', 'Vela').strip('()').strip(), source)
if nam.strip()[:2] == 'SN':
add_quantity(name, 'discoverdate', nam.strip()[2:], source)
add_quantity(name, 'host', 'Milky Way', source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
journal_events()
# 2014MNRAS.442..844F
result = Vizier.get_catalogs("J/MNRAS/442/844/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = '2014MNRAS.442..844F')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', str(row['zhost']), source, kind = 'host')
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
journal_events()
result = Vizier.get_catalogs("J/MNRAS/442/844/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'SN' + str(row['SN'])
name = add_event(name)
source = add_source(name, bibcode = "2014MNRAS.442..844F")
add_quantity(name, 'alias', name, source)
for band in ['B', 'V', 'R', 'I']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = row['MJD'], band = band, magnitude = row[bandtag],
e_magnitude = row['e_' + bandtag], source = source, telescope = 'KAIT', instrument = 'KAIT')
journal_events()
# 2012MNRAS.425.1789S
result = Vizier.get_catalogs("J/MNRAS/425/1789/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = ''.join(row['SimbadName'].split(' '))
name = add_event(name)
source = add_source(name, bibcode = '2012MNRAS.425.1789S')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', 'SN' + row['SN'], source)
add_quantity(name, 'host', row['Gal'], source)
if is_number(row['cz']):
add_quantity(name, 'redshift', str(round_sig(float(row['cz'])*km/clight, sig = get_sig_digits(str(row['cz'])))), source, kind = 'heliocentric')
add_quantity(name, 'ebv', str(row['E_B-V_']), source)
journal_events()
# 2015ApJS..219...13W
result = Vizier.get_catalogs("J/ApJS/219/13/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = u'LSQ' + str(row['LSQ'])
name = add_event(name)
source = add_source(name, bibcode = "2015ApJS..219...13W")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', row['RAJ2000'], source)
add_quantity(name, 'dec', row['DEJ2000'], source)
add_quantity(name, 'redshift', row['z'], source, error = row['e_z'], kind = 'heliocentric')
add_quantity(name, 'ebv', row['E_B-V_'], source)
add_quantity(name, 'claimedtype', 'Ia', source)
result = Vizier.get_catalogs("J/ApJS/219/13/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'LSQ' + row['LSQ']
name = add_event(name)
source = add_source(name, bibcode = "2015ApJS..219...13W")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), instrument = 'QUEST', telescope = 'ESO Schmidt',
observatory = 'La Silla', band = row['Filt'],
magnitude = row['mag'], e_magnitude = row['e_mag'], system = "Swope", source = source)
journal_events()
# 2012Natur.491..228C
result = Vizier.get_catalogs("J/other/Nat/491.228/tablef1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'SN2213-1745'
name = add_event(name)
source = add_source(name, bibcode = "2012Natur.491..228C")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'SLSN-R', source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['g', 'r', 'i']:
bandtag = band + '_mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = row["MJD" + band + "_"], band = band + "'", magnitude = row[bandtag],
e_magnitude = row["e_" + bandtag], source = source)
result = Vizier.get_catalogs("J/other/Nat/491.228/tablef2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'SN1000+0216'
name = add_event(name)
source = add_source(name, bibcode = "2012Natur.491..228C")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'SLSN-II?', source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['g', 'r', 'i']:
bandtag = band + '_mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = row["MJD" + band + "_"], band = band + "'", magnitude = row[bandtag],
e_magnitude = row["e_" + bandtag], source = source)
journal_events()
# 2011Natur.474..484Q
result = Vizier.get_catalogs("J/other/Nat/474.484/tables1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2011Natur.474..484Q")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = row['MJD'], band = row['Filt'], telescope = row['Tel'],
magnitude = row['mag'], e_magnitude = row['e_mag'], source = source)
journal_events()
# 2011ApJ...736..159G
result = Vizier.get_catalogs("J/ApJ/736/159/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'PTF10vdl'
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...736..159G")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), band = row['Filt'], telescope = row['Tel'], magnitude = row['mag'],
e_magnitude = row['e_mag'] if is_number(row['e_mag']) else '', upperlimit = (not is_number(row['e_mag'])), source = source)
journal_events()
# 2012ApJ...760L..33B
result = Vizier.get_catalogs("J/ApJ/760/L33/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'PTF12gzk'
name = add_event(name)
source = add_source(name, bibcode = "2012ApJ...760L..33B")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
# Fixing a typo in VizieR table
if str(row['JD']) == '2455151.456':
row['JD'] = '2456151.456'
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), band = row['Filt'], telescope = row['Inst'], magnitude = row['mag'],
e_magnitude = row['e_mag'], source = source)
journal_events()
# 2013ApJ...769...39S
result = Vizier.get_catalogs("J/ApJ/769/39/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'PS1-12sk'
name = add_event(name)
source = add_source(name, bibcode = "2013ApJ...769...39S")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
instrument = ''
telescope = ''
if row['Inst'] == 'RATCam':
instrument = row['Inst']
else:
telescope = row['Inst']
add_photometry(name, time = row['MJD'], band = row['Filt'], telescope = telescope, instrument = instrument, magnitude = row['mag'],
e_magnitude = row['e_mag'] if not row['l_mag'] else '', upperlimit = (row['l_mag'] == '>'), source = source)
journal_events()
# 2009MNRAS.394.2266P
# Note: Instrument info available via links in VizieR, can't auto-parse just yet.
name = 'SN2005cs'
name = add_event(name)
source = add_source(name, bibcode = "2009MNRAS.394.2266P")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/MNRAS/394/2266/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['U', 'B', 'V', 'R', 'I']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = band, magnitude = row[bandtag],
e_magnitude = (row["e_" + bandtag] if row['l_' + bandtag] != '>' else ''),
source = source, upperlimit = (row['l_' + bandtag] == '>'))
if "zmag" in row and is_number(row["zmag"]) and not isnan(float(row["zmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = "z", magnitude = row["zmag"],
e_magnitude = row["e_zmag"], source = source)
result = Vizier.get_catalogs("J/MNRAS/394/2266/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['B', 'V', 'R']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = band, magnitude = row[bandtag],
e_magnitude = (row["e_" + bandtag] if row['l_' + bandtag] != '>' else ''),
source = source, upperlimit = (row['l_' + bandtag] == '>'))
result = Vizier.get_catalogs("J/MNRAS/394/2266/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
for band in ['J', 'H', 'K']:
bandtag = band + 'mag'
if bandtag in row and is_number(row[bandtag]) and not isnan(float(row[bandtag])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), band = band, magnitude = row[bandtag],
e_magnitude = row["e_" + bandtag], source = source)
journal_events()
# 2013AJ....145...99A
result = Vizier.get_catalogs("J/AJ/145/99/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
name = 'SN2003ie'
name = add_event(name)
source = add_source(name, bibcode = "2013AJ....145...99A")
add_quantity(name, 'alias', name, source)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = row["MJD"], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"] if not row["l_Bmag"] else '',
upperlimit = (row['l_Bmag'] == '>'), source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = row["MJD"], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"] if is_number(row["e_Vmag"]) else '',
upperlimit = (not is_number(row["e_Vmag"])), source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = row["MJD"], band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"] if not row["l_Rmag"] else '',
upperlimit = (row['l_Rmag'] == '>'), source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = row["MJD"], band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
journal_events()
# 2011ApJ...729..143C
name = 'SN2008am'
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...729..143C")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/ApJ/729/143/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = row['MJD'], band = 'ROTSE', telescope = 'ROTSE', magnitude = row['mag'],
e_magnitude = row['e_mag'] if not row['l_mag'] else '', upperlimit = (row['l_mag'] == '<'), source = source)
result = Vizier.get_catalogs("J/ApJ/729/143/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Jmag" in row and is_number(row["Jmag"]) and not isnan(float(row["Jmag"])):
add_photometry(name, time = row["MJD"], telescope = "PAIRITEL", band = "J", magnitude = row["Jmag"],
e_magnitude = row["e_Jmag"], source = source)
if "Hmag" in row and is_number(row["Hmag"]) and not isnan(float(row["Hmag"])):
add_photometry(name, time = row["MJD"], telescope = "PAIRITEL", band = "H", magnitude = row["Hmag"],
e_magnitude = row["e_Hmag"], source = source)
if "Ksmag" in row and is_number(row["Ksmag"]) and not isnan(float(row["Ksmag"])):
add_photometry(name, time = row["MJD"], telescope = "PAIRITEL", band = "Ks", magnitude = row["Ksmag"],
e_magnitude = row["e_Ksmag"], source = source)
result = Vizier.get_catalogs("J/ApJ/729/143/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = row['MJD'], band = row['Filt'], telescope = 'P60', magnitude = row['mag'],
e_magnitude = row['e_mag'], source = source)
result = Vizier.get_catalogs("J/ApJ/729/143/table5")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
add_photometry(name, time = row['MJD'], band = row['Filt'], instrument = 'UVOT', telescope = 'Swift', magnitude = row['mag'],
e_magnitude = row['e_mag'], source = source)
journal_events()
# 2011ApJ...728...14P
name = 'SN2009bb'
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...728...14P")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/ApJ/728/14/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
result = Vizier.get_catalogs("J/ApJ/728/14/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "u_mag" in row and is_number(row["u_mag"]) and not isnan(float(row["u_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "u'", magnitude = row["u_mag"],
e_magnitude = row["e_u_mag"], source = source)
if "g_mag" in row and is_number(row["g_mag"]) and not isnan(float(row["g_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "g'", magnitude = row["g_mag"],
e_magnitude = row["e_g_mag"], source = source)
if "r_mag" in row and is_number(row["r_mag"]) and not isnan(float(row["r_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "r'", magnitude = row["r_mag"],
e_magnitude = row["e_r_mag"], source = source)
if "i_mag" in row and is_number(row["i_mag"]) and not isnan(float(row["i_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "i'", magnitude = row["i_mag"],
e_magnitude = row["e_i_mag"], source = source)
if "z_mag" in row and is_number(row["z_mag"]) and not isnan(float(row["z_mag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), telescope = row['Tel'], band = "z'", magnitude = row["z_mag"],
e_magnitude = row["e_z_mag"], source = source)
result = Vizier.get_catalogs("J/ApJ/728/14/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
if "Ymag" in row and is_number(row["Ymag"]) and not isnan(float(row["Ymag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), instrument = row['Inst'], band = "Y", magnitude = row["Ymag"],
e_magnitude = row["e_Ymag"], source = source)
if "Jmag" in row and is_number(row["Jmag"]) and not isnan(float(row["Jmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), instrument = row['Inst'], band = "J", magnitude = row["Jmag"],
e_magnitude = row["e_Jmag"], source = source)
if "Hmag" in row and is_number(row["Hmag"]) and not isnan(float(row["Hmag"])):
add_photometry(name, time = str(jd_to_mjd(Decimal(row["JD"]))), instrument = row['Inst'], band = "H", magnitude = row["Hmag"],
e_magnitude = row["e_Hmag"], source = source)
journal_events()
# 2011PAZh...37..837T
name = 'SN2009nr'
name = add_event(name)
source = add_source(name, bibcode = "2011PAZh...37..837T")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/PAZh/37/837/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = str(jd_to_mjd(Decimal(row["JD"]) + 2455000))
if "Umag" in row and is_number(row["Umag"]) and not isnan(float(row["Umag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "U", magnitude = row["Umag"],
e_magnitude = row["e_Umag"], source = source)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
journal_events()
# 2013MNRAS.433.1871B
name = 'SN2012aw'
name = add_event(name)
source = add_source(name, bibcode = "2013MNRAS.433.1871B")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/MNRAS/433/1871/table3a")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = str(jd_to_mjd(Decimal(row["JD"]) + 2456000))
if "Umag" in row and is_number(row["Umag"]) and not isnan(float(row["Umag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "U", magnitude = row["Umag"],
e_magnitude = row["e_Umag"], source = source)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rcmag" in row and is_number(row["Rcmag"]) and not isnan(float(row["Rcmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "Rc", magnitude = row["Rcmag"],
e_magnitude = row["e_Rcmag"], source = source)
if "Icmag" in row and is_number(row["Icmag"]) and not isnan(float(row["Icmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "Ic", magnitude = row["Icmag"],
e_magnitude = row["e_Icmag"], source = source)
result = Vizier.get_catalogs("J/MNRAS/433/1871/table3b")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = str(jd_to_mjd(Decimal(row["JD"]) + 2456000))
if "gmag" in row and is_number(row["gmag"]) and not isnan(float(row["gmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "g", magnitude = row["gmag"],
e_magnitude = row["e_gmag"], source = source)
if "rmag" in row and is_number(row["rmag"]) and not isnan(float(row["rmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "r", magnitude = row["rmag"],
e_magnitude = row["e_rmag"], source = source)
if "imag" in row and is_number(row["imag"]) and not isnan(float(row["imag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "i", magnitude = row["imag"],
e_magnitude = row["e_imag"], source = source)
if "zmag" in row and is_number(row["zmag"]) and not isnan(float(row["zmag"])):
add_photometry(name, time = mjd, telescope = row['Tel'], band = "z", magnitude = row["zmag"],
e_magnitude = row["e_zmag"], source = source)
journal_events()
# 2014AJ....148....1Z
name = 'SN2012fr'
name = add_event(name)
source = add_source(name, bibcode = "2014AJ....148....1Z")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/AJ/148/1/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = mjd, telescope = "LJT", instrument = "YFOSC", band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
result = Vizier.get_catalogs("J/AJ/148/1/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "Umag" in row and is_number(row["Umag"]) and not isnan(float(row["Umag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "U", magnitude = row["Umag"],
e_magnitude = row["e_Umag"], source = source)
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "UVW1" in row and is_number(row["UVW1"]) and not isnan(float(row["UVW1"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "W1", magnitude = row["UVW1"],
e_magnitude = row["e_UVW1"], source = source)
if "UVW2" in row and is_number(row["UVW2"]) and not isnan(float(row["UVW2"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "W2", magnitude = row["UVW2"],
e_magnitude = row["e_UVW2"], source = source)
if "UVM2" in row and is_number(row["UVM2"]) and not isnan(float(row["UVM2"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = "M2", magnitude = row["UVM2"],
e_magnitude = row["e_UVM2"], source = source)
result = Vizier.get_catalogs("J/AJ/148/1/table5")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "Bmag" in row and is_number(row["Bmag"]) and not isnan(float(row["Bmag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "B", magnitude = row["Bmag"],
e_magnitude = row["e_Bmag"], source = source)
if "Vmag" in row and is_number(row["Vmag"]) and not isnan(float(row["Vmag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "V", magnitude = row["Vmag"],
e_magnitude = row["e_Vmag"], source = source)
if "Rmag" in row and is_number(row["Rmag"]) and not isnan(float(row["Rmag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "R", magnitude = row["Rmag"],
e_magnitude = row["e_Rmag"], source = source)
if "Imag" in row and is_number(row["Imag"]) and not isnan(float(row["Imag"])):
add_photometry(name, time = mjd, telescope = "LJT", band = "I", magnitude = row["Imag"],
e_magnitude = row["e_Imag"], source = source)
journal_events()
# 2015ApJ...805...74B
name = 'SN2014J'
name = add_event(name)
source = add_source(name, bibcode = "2014AJ....148....1Z")
add_quantity(name, 'alias', name, source)
result = Vizier.get_catalogs("J/ApJ/805/74/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
mjd = row['MJD']
if "mag" in row and is_number(row["mag"]) and not isnan(float(row["mag"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = row["Filt"], magnitude = row["mag"],
e_magnitude = row["e_mag"], source = source)
elif "maglim" in row and is_number(row["maglim"]) and not isnan(float(row["maglim"])):
add_photometry(name, time = mjd, telescope = "Swift", instrument = "UVOT", band = row["Filt"], magnitude = row["maglim"],
upperlimit = True, source = source)
journal_events()
# 2011ApJ...741...97D
result = Vizier.get_catalogs("J/ApJ/741/97/table2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['SN'])
name = add_event(name)
source = add_source(name, bibcode = "2011ApJ...741...97D")
add_quantity(name, 'alias', name, source)
add_photometry(name, time = str(jd_to_mjd(Decimal(row['JD']))), band = row['Filt'], magnitude = row['mag'],
e_magnitude = row['e_mag'] if is_number(row['e_mag']) else '', upperlimit = (not is_number(row['e_mag'])), source = source)
journal_events()
# 2015MNRAS.448.1206M
# Note: Photometry from two SN can also be added from this source.
result = Vizier.get_catalogs("J/MNRAS/448/1206/table3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['zsp'], source, kind = 'spectroscopic')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', 'Ia', source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['zph'], source, error = row['e_zph'], kind = 'photometric')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', 'Ia?', source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/table5")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['zsp'], source, kind = 'spectroscopic')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', row['Type'], source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/table6")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', row['Type'], source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/tablea2")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', row['Typesoft']+'?', source)
add_quantity(name, 'claimedtype', row['Typepsnid']+'?', source)
result = Vizier.get_catalogs("J/MNRAS/448/1206/tablea3")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = str(row['Name'])
name = add_event(name)
source = add_source(name, bibcode = "2015MNRAS.448.1206M")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + name[4:6], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'maxappmag', row['rP1mag'], source, error = row['e_rP1mag'])
add_quantity(name, 'maxband', 'r', source)
add_quantity(name, 'claimedtype', 'Candidate', source)
journal_events()
# 2012AJ....143..126B
result = Vizier.get_catalogs("J/AJ/143/126/table4")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
if not row['Wcl'] or row['Wcl'] == 'N':
continue
row = convert_aq_output(row)
name = str(row['SN']).replace(' ', '')
name = add_event(name)
source = add_source(name, bibcode = "2012AJ....143..126B")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'Ia-' + row['Wcl'], source)
journal_events()
# 2015ApJS..220....9F
for viztab in ['1', '2']:
result = Vizier.get_catalogs("J/ApJS/220/9/table" + viztab)
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = add_event(row['SN'])
source = add_source(name, bibcode = "2015ApJS..220....9F")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', row['Type'], source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
if '?' not in row['Host']:
add_quantity(name, 'host', row['Host'].replace('_', ' '), source)
kind = ''
if 'Host' in row['n_z']:
kind = 'host'
elif 'Spectrum' in row['n_z']:
kind = 'spectroscopic'
add_quantity(name, 'redshift', row['z'], source, error = row['e_z'], kind = kind)
result = Vizier.get_catalogs("J/ApJS/220/9/table8")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = add_event(row['SN'])
source = add_source(name, bibcode = "2015ApJS..220....9F")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', row['Type'], source)
add_photometry(name, time = row['MJD'], band = row['Band'], magnitude = row['mag'],
e_magnitude = row["e_mag"], telescope = row["Tel"], source = source)
journal_events()
result = Vizier.get_catalogs("J/ApJ/673/999/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = add_event('SN'+row['SN'])
source = add_source(name, bibcode = "2008ApJ...673..999P")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', row['RAJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row['DEJ2000'], source, unit = 'floatdegrees')
add_quantity(name, 'redshift', row['z'], source, kind = 'host')
add_quantity(name, 'hostra', row['RAGdeg'], source, unit = 'floatdegrees')
add_quantity(name, 'hostdec', row['DEGdeg'], source, unit = 'floatdegrees')
add_quantity(name, 'claimedtype', row['Type'].strip(':'), source)
journal_events()
if do_task(task, 'donations'):
# Nicholl 04-01-16 donation
with open("../sne-external/Nicholl-04-01-16/bibcodes.json", 'r') as f:
bcs = json.loads(f.read())
for datafile in sorted(glob("../sne-external/Nicholl-04-01-16/*.txt"), key=lambda s: s.lower()):
name = os.path.basename(datafile).split('_')[0]
name = add_event(name)
bibcode = ''
for bc in bcs:
if name in bcs[bc]:
bibcode = bc
if not bibcode:
raise(ValueError('Bibcode not found!'))
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
with open(datafile,'r') as f:
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
for r, rrow in enumerate(tsvin):
row = list(filter(None, rrow))
if not row:
continue
if row[0][0] == '#' and row[0] != '#MJD':
continue
if row[0] == '#MJD':
bands = [x for x in row[1:] if x and 'err' not in x]
continue
mjd = row[0]
if not is_number(mjd):
continue
for v, val in enumerate(row[1::2]):
upperlimit = ''
if '>' in val:
upperlimit = True
mag = val.strip('>')
if not is_number(mag) or isnan(float(mag)) or float(mag) > 90.0:
continue
err = ''
if is_number(row[2*v+2]) and not isnan(float(row[2*v+2])):
err = row[2*v+2]
add_photometry(name, time = mjd, band = bands[v], magnitude = mag,
e_magnitude = err, upperlimit = upperlimit, source = source)
journal_events()
# Maggi 04-11-16 donation (MC SNRs)
with open('../sne-external/Maggi-04-11-16/LMCSNRs_OpenSNe.csv') as f:
tsvin = csv.reader(f, delimiter=',')
for row in tsvin:
name = 'MCSNR ' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2016A&A...585A.162M')
add_quantity(name, 'alias', name, source)
if row[1] != 'noname':
add_quantity(name, "alias", row[1], source)
add_quantity(name, 'ra', row[2], source)
add_quantity(name, 'dec', row[3], source)
add_quantity(name, 'host', 'LMC', source)
if row[4] == '1':
add_quantity(name, 'claimedtype', 'Ia', source)
elif row[4] == '2':
add_quantity(name, 'claimedtype', 'CC', source)
with open('../sne-external/Maggi-04-11-16/SMCSNRs_OpenSNe.csv') as f:
tsvin = csv.reader(f, delimiter=',')
for row in tsvin:
name = 'MCSNR ' + row[0]
name = add_event(name)
source = add_source(name, refname = 'Pierre Maggi')
add_quantity(name, 'alias', name, source)
add_quantity(name, "alias", row[1], source)
add_quantity(name, "alias", row[2], source)
add_quantity(name, 'ra', row[3], source)
add_quantity(name, 'dec', row[4], source)
add_quantity(name, 'host', 'SMC', source)
journal_events()
# Galbany 04-18-16 donation
folders = next(os.walk('../sne-external/galbany-04-18-16/'))[1]
bibcode = '2016AJ....151...33G'
for folder in folders:
infofiles = glob("../sne-external/galbany-04-18-16/" + folder + "/*.info")
photfiles = glob("../sne-external/galbany-04-18-16/" + folder + "/*.out*")
zhel = ''
zcmb = ''
zerr = ''
for path in infofiles:
with open(path, 'r') as f:
lines = f.read().splitlines()
for line in lines:
splitline = line.split(':')
field = splitline[0].strip().lower()
value = splitline[1].strip()
if field == 'name':
name = value[:6].upper() + (value[6].upper() if len(value) == 7 else value[6:])
name = add_event(name)
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
elif field == 'type':
claimedtype = value.replace('SN', '')
add_quantity(name, 'claimedtype', claimedtype, source)
elif field == 'zhel':
zhel = value
elif field == 'redshift_error':
zerr = value
elif field == 'zcmb':
zcmb = value
elif field == 'ra':
add_quantity(name, 'ra', value, source, unit = 'floatdegrees')
elif field == 'dec':
add_quantity(name, 'dec', value, source, unit = 'floatdegrees')
elif field == 'host':
add_quantity(name, 'host', value.replace('- ', '-').replace('G ', 'G'), source)
elif field == 'e(b-v)_mw':
add_quantity(name, 'ebv', value, source)
add_quantity(name, 'redshift', zhel, source, error = zerr, kind = 'heliocentric')
add_quantity(name, 'redshift', zcmb, source, error = zerr, kind = 'cmb')
for path in photfiles:
with open(path, 'r') as f:
band = ''
lines = f.read().splitlines()
for li, line in enumerate(lines):
if li in [0, 2, 3]:
continue
if li == 1:
band = line.split(':')[-1].strip()
else:
cols = list(filter(None, line.split()))
if not cols:
continue
add_photometry(name, time = cols[0], magnitude = cols[1], e_magnitude = cols[2],
band = band, system = cols[3], telescope = cols[4], source = source)
journal_events()
# Brown 05-14-16
files = glob("../sne-external/brown-05-14-16/*.dat")
for fi in tq(files, currenttask):
name = os.path.basename(fi).split('_')[0]
name = add_event(name)
source = add_source(name, refname = 'Swift Supernovae', bibcode = '2014Ap&SS.354...89B',
url = 'http://people.physics.tamu.edu/pbrown/SwiftSN/swift_sn.html')
add_quantity(name, 'alias', name, source)
with open(fi, 'r') as f:
lines = f.read().splitlines()
for line in lines:
if not line or line[0] == '#':
continue
cols = list(filter(None, line.split()))
band = cols[0]
mjd = cols[1]
# Skip lower limit entries for now
if cols[2] == 'NULL' and cols[6] == 'NULL':
continue
isupp = cols[2] == 'NULL' and cols[6] != 'NULL'
mag = cols[2] if not isupp else cols[4]
e_mag = cols[3] if not isupp else ''
upp = '' if not isupp else True
add_photometry(name, time = mjd, magnitude = mag, e_magnitude = e_mag,
upperlimit = upp, band = band, source = source,
telescope = 'Swift', instrument = 'UVOT', system = 'Vega')
journal_events()
# Nicholl 05-03-16
files = glob("../sne-external/nicholl-05-03-16/*.txt")
name = add_event('SN2015bn')
source = add_source(name, bibcode = '2016arXiv160304748N')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', 'PS15ae', source)
for fi in tq(files, currenttask):
telescope = os.path.basename(fi).split('_')[1]
with open(fi, 'r') as f:
lines = f.read().splitlines()
for li, line in enumerate(lines):
if not line or (line[0] == '#' and li != 0):
continue
cols = list(filter(None, line.split()))
if not cols:
continue
if li == 0:
bands = cols[1:]
continue
mjd = cols[0]
for ci, col in enumerate(cols[1::2]):
if not is_number(col):
continue
emag = cols[2*ci+2]
upp = ''
if not is_number(emag):
emag = ''
upp = True
add_photometry(name, time = mjd, magnitude = col, e_magnitude = emag,
upperlimit = upp, band = bands[ci], source = source,
telescope = telescope, instrument = 'UVOT' if telescope == 'Swift' else '')
journal_events()
if do_task(task, 'pessto-dr1'):
with open("../sne-external/PESSTO_MPHOT.csv", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tsvin):
if ri == 0:
bands = [x.split('_')[0] for x in row[3::2]]
systems = [x.split('_')[1].capitalize().replace('Ab', 'AB') for x in row[3::2]]
continue
name = row[1]
name = add_event(name)
source = add_source(name, bibcode = "2015A&A...579A..40S")
add_quantity(name, 'alias', name, source)
for hi, ci in enumerate(range(3,len(row)-1,2)):
if not row[ci]:
continue
add_photometry(name, time = row[2], magnitude = row[ci], e_magnitude = row[ci+1],
band = bands[hi], system = systems[hi], telescope = 'Swift' if systems[hi] == 'Swift' else '',
source = source)
journal_events()
if do_task(task, 'scp'):
with open("../sne-external/SCP09.csv", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask)):
if ri == 0:
continue
name = row[0].replace('SCP', 'SCP-')
name = add_event(name)
source = add_source(name, refname = 'Supernova Cosmology Project', url = 'http://supernova.lbl.gov/2009ClusterSurvey/')
add_quantity(name, 'alias', name, source)
if row[1]:
add_quantity(name, 'alias', row[1], source)
if row[2]:
add_quantity(name, 'redshift', row[2], source, kind = 'spectroscopic' if row[3] == 'sn' else 'host')
if row[4]:
add_quantity(name, 'redshift', row[2], source, kind = 'cluster')
if row[6]:
claimedtype = row[6].replace('SN ', '')
kind = ('spectroscopic/light curve' if 'a' in row[7] and 'c' in row[7] else
'spectroscopic' if 'a' in row[7] else 'light curve' if 'c' in row[7] else '')
if claimedtype != '?':
add_quantity(name, 'claimedtype', claimedtype, source, kind = kind)
journal_events()
if do_task(task, 'ascii'):
# 2006ApJ...645..841N
with open("../sne-external/2006ApJ...645..841N-table3.csv", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask)):
name = 'SNLS-' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2006ApJ...645..841N')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'redshift', row[1], source, kind = 'spectroscopic')
astrot = astrotime(float(row[4]) + 2450000., format = 'jd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
journal_events()
# Anderson 2014
for datafile in tq(sorted(glob("../sne-external/SNII_anderson2014/*.dat"), key=lambda s: s.lower()), currenttask):
basename = os.path.basename(datafile)
if not is_number(basename[:2]):
continue
if basename == '0210_V.dat':
name = 'SN0210'
else:
name = ('SN20' if int(basename[:2]) < 50 else 'SN19') + basename.split('_')[0]
name = add_event(name)
source = add_source(name, bibcode = '2014ApJ...786...67A')
add_quantity(name, 'alias', name, source)
if name in ['SN1999ca','SN2003dq','SN2008aw']:
system = 'Swope'
else:
system = 'Landolt'
with open(datafile,'r') as f:
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
for row in tsvin:
if not row[0]:
continue
add_photometry(name, time = str(jd_to_mjd(Decimal(row[0]))), band = 'V', magnitude = row[1], e_magnitude = row[2], system = system, source = source)
journal_events()
# stromlo
stromlobands = ['B','V','R','I','VM','RM']
with open('../sne-external/J_A+A_415_863-1/photometry.csv', 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for row in tq(tsvin, currenttask):
name = row[0]
name = add_event(name)
source = add_source(name, bibcode = "2004A&A...415..863G")
add_quantity(name, 'alias', name, source)
mjd = str(jd_to_mjd(Decimal(row[1])))
for ri, ci in enumerate(range(2,len(row),3)):
if not row[ci]:
continue
band = stromlobands[ri]
upperlimit = True if (not row[ci+1] and row[ci+2]) else False
e_upper_magnitude = str(abs(Decimal(row[ci+1]))) if row[ci+1] else ''
e_lower_magnitude = str(abs(Decimal(row[ci+2]))) if row[ci+2] else ''
add_photometry(name, time = mjd, band = band, magnitude = row[ci],
e_upper_magnitude = e_upper_magnitude, e_lower_magnitude = e_lower_magnitude,
upperlimit = upperlimit, telescope = 'MSSSO 1.3m' if band in ['VM', 'RM'] else 'CTIO',
instrument = 'MaCHO' if band in ['VM', 'RM'] else '', source = source)
journal_events()
# 2015MNRAS.449..451W
with open("../sne-external/2015MNRAS.449..451W.dat", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if r == 0:
continue
namesplit = row[0].split('/')
name = namesplit[-1]
if name.startswith('SN'):
name = name.replace(' ', '')
name = add_event(name)
source = add_source(name, bibcode = '2015MNRAS.449..451W')
add_quantity(name, 'alias', name, source)
if len(namesplit) > 1:
add_quantity(name, 'alias', namesplit[0], source)
add_quantity(name, 'claimedtype', row[1], source)
add_photometry(name, time = row[2], band = row[4], magnitude = row[3], source = source)
journal_events()
# 2016MNRAS.459.1039T
with open("../sne-external/2016MNRAS.459.1039T.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
name = add_event('LSQ13zm')
source = add_source(name, bibcode = '2016MNRAS.459.1039T')
add_quantity(name, 'alias', name, source)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
bands = [x.replace('(err)', '') for x in row[3:-1]]
continue
mjd = row[1]
mags = [re.sub(r'\([^)]*\)', '', x) for x in row[3:-1]]
upps = [True if '>' in x else '' for x in mags]
mags = [x.replace('>', '') for x in mags]
errs = [x[x.find("(")+1:x.find(")")] if "(" in x else '' for x in row[3:-1]]
for mi, mag in enumerate(mags):
if not is_number(mag):
continue
add_photometry(name, time = mjd, band = bands[mi], magnitude = mag, e_magnitude = errs[mi],
instrument = row[-1], upperlimit = upps[mi], source = source)
journal_events()
# 2015ApJ...804...28G
with open("../sne-external/2015ApJ...804...28G.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
name = add_event('PS1-13arp')
source = add_source(name, bibcode = '2015ApJ...804...28G')
add_quantity(name, 'alias', name, source)
for r, row in enumerate(tq(data, currenttask)):
if r == 0:
continue
mjd = row[1]
mag = row[3]
upp = True if '<' in mag else ''
mag = mag.replace('<', '')
err = row[4] if is_number(row[4]) else ''
ins = row[5]
add_photometry(name, time = mjd, band = row[0], magnitude = mag, e_magnitude = err,
instrument = ins, upperlimit = upp, source = source)
journal_events()
# 2016ApJ...819...35A
with open("../sne-external/2016ApJ...819...35A.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
continue
name = add_event(row[0])
source = add_source(name, bibcode = '2016ApJ...819...35A')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', row[1], source)
add_quantity(name, 'dec', row[2], source)
add_quantity(name, 'redshift', row[3], source)
add_quantity(name, 'discoverdate',
datetime.strptime(row[4], '%Y %b %d').isoformat().split('T')[0].replace('-', '/'), source)
journal_events()
# 2014ApJ...784..105W
with open("../sne-external/2014ApJ...784..105W.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
continue
name = add_event(row[0])
source = add_source(name, bibcode = '2014ApJ...784..105W')
add_quantity(name, 'alias', name, source)
mjd = row[1]
band = row[2]
mag = row[3]
err = row[4]
add_photometry(name, time = mjd, band = row[2], magnitude = mag, e_magnitude = err,
instrument = 'WHIRC', telescope = 'WIYN 3.5 m', observatory = 'NOAO',
system = 'WHIRC', source = source)
journal_events()
# 2012MNRAS.425.1007B
with open("../sne-external/2012MNRAS.425.1007B.tsv", 'r') as f:
data = csv.reader(f, delimiter='\t', quotechar='"', skipinitialspace = True)
for r, row in enumerate(tq(data, currenttask)):
if row[0][0] == '#':
bands = row[2:]
continue
name = add_event(row[0])
source = add_source(name, bibcode = '2012MNRAS.425.1007B')
add_quantity(name, 'alias', name, source)
mjd = row[1]
mags = [x.split('±')[0].strip() for x in row[2:]]
errs = [x.split('±')[1].strip() if '±' in x else '' for x in row[2:]]
if row[0] == 'PTF09dlc':
ins = 'HAWK-I'
tel = 'VLT 8.1m'
obs = 'ESO'
else:
ins = 'NIRI'
tel = 'Gemini North 8.2m'
obs = 'Gemini'
for mi, mag in enumerate(mags):
if not is_number(mag):
continue
add_photometry(name, time = mjd, band = bands[mi], magnitude = mag, e_magnitude = errs[mi],
instrument = ins, telescope = tel, observatory = obs,
system = 'Natural', source = source)
journal_events()
# CCCP
if do_task(task, 'cccp'):
cccpbands = ['B', 'V', 'R', 'I']
for datafile in sorted(glob("../sne-external/CCCP/apj407397*.txt"), key=lambda s: s.lower()):
with open(datafile,'r') as f:
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
for r, row in enumerate(tsvin):
if r == 0:
continue
elif r == 1:
name = 'SN' + row[0].split('SN ')[-1]
name = add_event(name)
source = add_source(name, bibcode = '2012ApJ...744...10K')
add_quantity(name, 'alias', name, source)
elif r >= 5:
mjd = str(Decimal(row[0]) + 53000)
for b, band in enumerate(cccpbands):
if row[2*b + 1]:
if not row[2*b + 2]:
upplim = True
add_photometry(name, time = mjd, band = band, magnitude = row[2*b + 1].strip('>'),
e_magnitude = row[2*b + 2], upperlimit = (not row[2*b + 2]), source = source)
if archived_task('cccp'):
with open('../sne-external/CCCP/sc_cccp.html', 'r') as f:
html = f.read()
else:
session = requests.Session()
response = session.get("https://webhome.weizmann.ac.il/home/iair/sc_cccp.html")
html = response.text
with open('../sne-external/CCCP/sc_cccp.html', 'w') as f:
f.write(html)
soup = BeautifulSoup(html, "html5lib")
links = soup.body.findAll("a")
for link in tq(links, currenttask):
if 'sc_sn' in link['href']:
name = add_event(link.text.replace(' ', ''))
source = add_source(name, refname = 'CCCP', url = 'https://webhome.weizmann.ac.il/home/iair/sc_cccp.html')
add_quantity(name, 'alias', name, source)
if archived_task('cccp'):
with open('../sne-external/CCCP/' + link['href'].split('/')[-1], 'r') as f:
html2 = f.read()
else:
response2 = session.get("https://webhome.weizmann.ac.il/home/iair/" + link['href'])
html2 = response2.text
with open('../sne-external/CCCP/' + link['href'].split('/')[-1], 'w') as f:
f.write(html2)
soup2 = BeautifulSoup(html2, "html5lib")
links2 = soup2.body.findAll("a")
for link2 in links2:
if ".txt" in link2['href'] and '_' in link2['href']:
band = link2['href'].split('_')[1].split('.')[0].upper()
if archived_task('cccp'):
fname = '../sne-external/CCCP/' + link2['href'].split('/')[-1]
if not os.path.isfile(fname):
continue
with open(fname, 'r') as f:
html3 = f.read()
else:
response3 = session.get("https://webhome.weizmann.ac.il/home/iair/cccp/" + link2['href'])
if response3.status_code == 404:
continue
html3 = response3.text
with open('../sne-external/CCCP/' + link2['href'].split('/')[-1], 'w') as f:
f.write(html3)
table = [[str(Decimal(y.strip())).rstrip('0') for y in x.split(",")] for x in list(filter(None, html3.split("\n")))]
for row in table:
add_photometry(name, time = str(Decimal(row[0]) + 53000), band = band, magnitude = row[1], e_magnitude = row[2], source = source)
journal_events()
# Suspect catalog
if do_task(task, 'suspect'):
with open('../sne-external/suspectreferences.csv','r') as f:
tsvin = csv.reader(f, delimiter=',', skipinitialspace=True)
suspectrefdict = {}
for row in tsvin:
suspectrefdict[row[0]] = row[1]
for datafile in tq(sorted(glob("../sne-external/SUSPECT/*.html"), key=lambda s: s.lower()), currenttask):
basename = os.path.basename(datafile)
basesplit = basename.split('-')
name = basesplit[1]
name = add_event(name)
if name.startswith('SN') and is_number(name[2:]):
name = name + 'A'
band = basesplit[3].split('.')[0]
ei = int(basesplit[2])
bandlink = 'file://' + os.path.abspath(datafile)
bandresp = urllib.request.urlopen(bandlink)
bandsoup = BeautifulSoup(bandresp, "html5lib")
bandtable = bandsoup.find('table')
names = bandsoup.body.findAll(text=re.compile("Name"))
reference = ''
for link in bandsoup.body.findAll('a'):
if 'adsabs' in link['href']:
reference = str(link).replace('"', "'")
bibcode = unescape(suspectrefdict[reference])
source = add_source(name, bibcode = bibcode)
secondaryreference = "SUSPECT"
secondaryrefurl = "https://www.nhn.ou.edu/~suspect/"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if ei == 1:
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, secondarysource)
add_quantity(name, 'host', names[1].split(':')[1].strip(), secondarysource)
redshifts = bandsoup.body.findAll(text=re.compile("Redshift"))
if redshifts:
add_quantity(name, 'redshift', redshifts[0].split(':')[1].strip(), secondarysource, kind = 'heliocentric')
hvels = bandsoup.body.findAll(text=re.compile("Heliocentric Velocity"))
#if hvels:
# add_quantity(name, 'velocity', hvels[0].split(':')[1].strip().split(' ')[0],
# secondarysource, kind = 'heliocentric')
types = bandsoup.body.findAll(text=re.compile("Type"))
add_quantity(name, 'claimedtype', types[0].split(':')[1].strip().split(' ')[0], secondarysource)
for r, row in enumerate(bandtable.findAll('tr')):
if r == 0:
continue
col = row.findAll('td')
mjd = str(jd_to_mjd(Decimal(col[0].contents[0])))
mag = col[3].contents[0]
if mag.isspace():
mag = ''
else:
mag = str(mag)
e_magnitude = col[4].contents[0]
if e_magnitude.isspace():
e_magnitude = ''
else:
e_magnitude = str(e_magnitude)
add_photometry(name, time = mjd, band = band, magnitude = mag, e_magnitude = e_magnitude, source = secondarysource + ',' + source)
journal_events()
# CfA data
if do_task(task, 'cfa'):
for fname in tq(sorted(glob("../sne-external/cfa-input/*.dat"), key=lambda s: s.lower()), currenttask):
f = open(fname,'r')
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
csv_data = []
for r, row in enumerate(tsvin):
new = []
for item in row:
new.extend(item.split("\t"))
csv_data.append(new)
for r, row in enumerate(csv_data):
for c, col in enumerate(row):
csv_data[r][c] = col.strip()
csv_data[r] = [_f for _f in csv_data[r] if _f]
eventname = os.path.basename(os.path.splitext(fname)[0])
eventparts = eventname.split('_')
name = snname(eventparts[0])
name = add_event(name)
secondaryname = 'CfA Supernova Archive'
secondaryurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
secondarysource = add_source(name, refname = secondaryname, url = secondaryurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, secondarysource)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, secondarysource)
eventbands = list(eventparts[1])
tu = 'MJD'
jdoffset = Decimal(0.)
for rc, row in enumerate(csv_data):
if len(row) > 0 and row[0][0] == "#":
if len(row[0]) > 2 and row[0][:3] == "#JD":
tu = 'JD'
rowparts = row[0].split('-')
jdoffset = Decimal(rowparts[1])
elif len(row[0]) > 6 and row[0][:7] == "#Julian":
tu = 'JD'
jdoffset = Decimal(0.)
elif len(row) > 1 and row[1].lower() == "photometry":
for ci, col in enumerate(row[2:]):
if col[0] == "(":
refstr = ' '.join(row[2+ci:])
refstr = refstr.replace('(','').replace(')','')
bibcode = unescape(refstr)
source = add_source(name, bibcode = bibcode)
elif len(row) > 1 and row[1] == "HJD":
tu = "HJD"
continue
elif len(row) > 0:
mjd = row[0]
for v, val in enumerate(row):
if v == 0:
if tu == 'JD':
mjd = str(jd_to_mjd(Decimal(val) + jdoffset))
tuout = 'MJD'
elif tu == 'HJD':
mjd = str(jd_to_mjd(Decimal(val)))
tuout = 'MJD'
else:
mjd = val
tuout = tu
elif v % 2 != 0:
if float(row[v]) < 90.0:
add_photometry(name, u_time = tuout, time = mjd, band = eventbands[(v-1)//2], magnitude = row[v], e_magnitude = row[v+1], source = secondarysource + ',' + source)
f.close()
# Hicken 2012
f = open("../sne-external/hicken-2012-standard.dat", 'r')
tsvin = csv.reader(f, delimiter='|', skipinitialspace=True)
for r, row in enumerate(tq(tsvin, currenttask)):
if r <= 47:
continue
if row[0][:2] != 'sn':
name = 'SN' + row[0].strip()
else:
name = row[0].strip()
name = add_event(name)
source = add_source(name, bibcode = '2012ApJS..200...12H')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'claimedtype', 'Ia', source)
add_photometry(name, u_time = 'MJD', time = row[2].strip(), band = row[1].strip(),
magnitude = row[6].strip(), e_magnitude = row[7].strip(), source = source)
# Bianco 2014
tsvin = open("../sne-external/bianco-2014-standard.dat", 'r')
tsvin = csv.reader(tsvin, delimiter=' ', skipinitialspace=True)
for row in tq(tsvin, currenttask):
name = 'SN' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2014ApJS..213...19B')
add_quantity(name, 'alias', name, source)
add_photometry(name, u_time = 'MJD', time = row[2], band = row[1], magnitude = row[3],
e_magnitude = row[4], telescope = row[5], system = "Standard", source = source)
f.close()
journal_events()
# New UCB import
if do_task(task, 'ucb'):
secondaryreference = "UCB Filippenko Group's Supernova Database (SNDB)"
secondaryrefurl = "http://heracles.astro.berkeley.edu/sndb/info"
secondaryrefbib = "2012MNRAS.425.1789S"
jsontxt = load_cached_url("http://heracles.astro.berkeley.edu/sndb/download?id=allpubphot",
'../sne-external-spectra/UCB/allpub.json')
if not jsontxt:
continue
photom = json.loads(jsontxt)
photom = sorted(photom, key = lambda k: k['ObjName'])
for phot in tq(photom, currenttask = currenttask):
name = phot["ObjName"]
name = add_event(name)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondaryrefbib, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
sources = [secondarysource]
if phot["Reference"]:
sources += [add_source(name, bibcode = phot["Reference"])]
sources = uniq_cdl(sources)
if phot["Type"] and phot["Type"].strip() != "NoMatch":
for ct in phot["Type"].strip().split(','):
add_quantity(name, 'claimedtype', ct.replace('-norm', '').strip(), sources)
if phot["DiscDate"]:
add_quantity(name, 'discoverdate', phot["DiscDate"].replace('-', '/'), sources)
if phot["HostName"]:
add_quantity(name, 'host', urllib.parse.unquote(phot["HostName"]).replace('*', ''), sources)
filename = phot["Filename"] if phot["Filename"] else ''
if not filename:
raise(ValueError('Filename not found for SNDB phot!'))
if not phot["PhotID"]:
raise(ValueError('ID not found for SNDB phot!'))
filepath = '../sne-external/SNDB/' + filename
if archived_task('ucb') and os.path.isfile(filepath):
with open(filepath, 'r') as f:
phottxt = f.read()
else:
session = requests.Session()
response = session.get("http://heracles.astro.berkeley.edu/sndb/download?id=dp:" + str(phot["PhotID"]))
phottxt = response.text
with open(filepath, 'w') as f:
f.write(phottxt)
tsvin = csv.reader(phottxt.splitlines(), delimiter=' ', skipinitialspace=True)
for r, row in enumerate(tsvin):
if len(row) > 0 and row[0] == "#":
continue
mjd = row[0]
magnitude = row[1]
if magnitude and float(magnitude) > 99.0:
continue
e_magnitude = row[2]
band = row[4]
telescope = row[5]
add_photometry(name, time = mjd, telescope = telescope, band = band, magnitude = magnitude,
e_magnitude = e_magnitude, source = sources)
journal_events()
# Import SDSS
if do_task(task, 'sdss'):
with open('../sne-external/SDSS/2010ApJ...708..661D.txt', 'r') as f:
bibcodes2010 = f.read().split("\n")
sdssbands = ['u', 'g', 'r', 'i', 'z']
for fname in tq(sorted(glob("../sne-external/SDSS/*.sum"), key=lambda s: s.lower()), currenttask):
f = open(fname,'r')
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
basename = os.path.basename(fname)
if basename in bibcodes2010:
bibcode = '2010ApJ...708..661D'
else:
bibcode = '2008AJ....136.2306H'
for r, row in enumerate(tsvin):
if r == 0:
if row[5] == "RA:":
name = "SDSS-II " + row[3]
else:
name = "SN" + row[5]
name = add_event(name)
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', "SDSS-II " + row[3], source)
if row[5] != "RA:":
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, source)
add_quantity(name, 'ra', row[-4], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row[-2], source, unit = 'floatdegrees')
if r == 1:
error = row[4] if float(row[4]) >= 0.0 else ''
add_quantity(name, 'redshift', row[2], source, error = error, kind = 'heliocentric')
if r >= 19:
# Skip bad measurements
if int(row[0]) > 1024:
continue
mjd = row[1]
band = sdssbands[int(row[2])]
magnitude = row[3]
e_magnitude = row[4]
telescope = "SDSS"
add_photometry(name, time = mjd, telescope = telescope, band = band, magnitude = magnitude,
e_magnitude = e_magnitude, source = source, system = "SDSS")
f.close()
journal_events()
#Import GAIA
if do_task(task, 'gaia'):
fname = '../sne-external/GAIA/alerts.csv'
csvtxt = load_cached_url('http://gsaweb.ast.cam.ac.uk/alerts/alerts.csv', fname)
if not csvtxt:
continue
tsvin = csv.reader(csvtxt.splitlines(), delimiter=',', skipinitialspace=True)
reference = "Gaia Photometric Science Alerts"
refurl = "http://gsaweb.ast.cam.ac.uk/alerts/alertsindex"
for ri, row in enumerate(tq(tsvin, currenttask)):
if ri == 0 or not row:
continue
name = add_event(row[0])
source = add_source(name, refname = reference, url = refurl)
add_quantity(name, 'alias', name, source)
year = '20' + re.findall(r'\d+', row[0])[0]
add_quantity(name, 'discoverdate', year, source)
add_quantity(name, 'ra', row[2], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row[3], source, unit = 'floatdegrees')
if row[7] and row[7] != 'unknown':
add_quantity(name, 'claimedtype', row[7].replace('SNe', '').replace('SN', '').strip(), source)
elif (True in [x in row[9].upper() for x in ['SN CANDIATE', 'CANDIDATE SN', 'HOSTLESS SN']]):
add_quantity(name, 'claimedtype', 'Candidate', source)
if 'aka' in row[9].replace('gakaxy','galaxy').lower() and 'AKARI' not in row[9]:
commentsplit = (row[9].replace('_', ' ').replace('MLS ', 'MLS').replace('CSS ', 'CSS').
replace('SN iPTF', 'iPTF').replace('SN ', 'SN').replace('AT ', 'AT').split())
for csi, cs in enumerate(commentsplit):
if 'aka' in cs.lower() and csi < len(commentsplit) - 1:
alias = commentsplit[csi+1].strip('(),:.').replace('PSNJ', 'PSN J')
if alias[:6] == 'ASASSN' and alias[6] != '-':
alias = 'ASASSN-' + alias[6:]
add_quantity(name, 'alias', alias, source)
break
fname = '../sne-external/GAIA/' + row[0] + '.csv'
if not args.fullrefresh and archived_task('gaia') and os.path.isfile(fname):
with open(fname, 'r') as f:
csvtxt = f.read()
else:
response = urllib.request.urlopen("http://gsaweb.ast.cam.ac.uk/alerts/alert/" + row[0] + "/lightcurve.csv")
with open(fname, 'w') as f:
csvtxt = response.read().decode('utf-8')
f.write(csvtxt)
tsvin2 = csv.reader(csvtxt.splitlines())
for ri2, row2 in enumerate(tsvin2):
if ri2 <= 1 or not row2:
continue
mjd = str(jd_to_mjd(Decimal(row2[1].strip())))
magnitude = row2[2].strip()
if magnitude == 'null':
continue
e_magnitude = 0.
telescope = 'GAIA'
band = 'G'
add_photometry(name, time = mjd, telescope = telescope, band = band, magnitude = magnitude, e_magnitude = e_magnitude, source = source)
if args.update:
journal_events()
journal_events()
# Import CSP
# VizieR catalogs exist for this: J/AJ/139/519, J/AJ/142/156. Should replace eventually.
if do_task(task, 'csp'):
cspbands = ['u', 'B', 'V', 'g', 'r', 'i', 'Y', 'J', 'H', 'K']
for fname in tq(sorted(glob("../sne-external/CSP/*.dat"), key=lambda s: s.lower()), currenttask):
f = open(fname,'r')
tsvin = csv.reader(f, delimiter='\t', skipinitialspace=True)
eventname = os.path.basename(os.path.splitext(fname)[0])
eventparts = eventname.split('opt+')
name = snname(eventparts[0])
name = add_event(name)
reference = "Carnegie Supernova Project"
refbib = "2010AJ....139..519C"
refurl = "http://csp.obs.carnegiescience.edu/data"
source = add_source(name, bibcode = refbib, refname = reference, url = refurl)
add_quantity(name, 'alias', name, source)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, source)
for r, row in enumerate(tsvin):
if len(row) > 0 and row[0][0] == "#":
if r == 2:
add_quantity(name, 'redshift', row[0].split(' ')[-1], source, kind = 'cmb')
add_quantity(name, 'ra', row[1].split(' ')[-1], source)
add_quantity(name, 'dec', row[2].split(' ')[-1], source)
continue
for v, val in enumerate(row):
if v == 0:
mjd = val
elif v % 2 != 0:
if float(row[v]) < 90.0:
add_photometry(name, time = mjd, observatory = 'LCO', band = cspbands[(v-1)//2],
system = 'CSP', magnitude = row[v], e_magnitude = row[v+1], source = source)
f.close()
journal_events()
# Import ITEP
if do_task(task, 'itep'):
itepbadsources = ['2004ApJ...602..571B']
needsbib = []
with open("../sne-external/itep-refs.txt",'r') as f:
refrep = f.read().splitlines()
refrepf = dict(list(zip(refrep[1::2], refrep[::2])))
f = open("../sne-external/itep-lc-cat-28dec2015.txt",'r')
tsvin = csv.reader(f, delimiter='|', skipinitialspace=True)
curname = ''
for r, row in enumerate(tq(tsvin, currenttask)):
if r <= 1 or len(row) < 7:
continue
name = 'SN' + row[0].strip()
mjd = str(jd_to_mjd(Decimal(row[1].strip())))
band = row[2].strip()
magnitude = row[3].strip()
e_magnitude = row[4].strip()
reference = row[6].strip().strip(',')
if curname != name:
curname = name
name = add_event(name)
secondaryreference = "Sternberg Astronomical Institute Supernova Light Curve Catalogue"
secondaryrefurl = "http://dau.itep.ru/sn/node/72"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, secondarysource)
if reference in refrepf:
bibcode = unescape(refrepf[reference])
source = add_source(name, bibcode = bibcode)
else:
needsbib.append(reference)
source = add_source(name, refname = reference) if reference else ''
if bibcode not in itepbadsources:
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, source = secondarysource + ',' + source)
f.close()
# Write out references that could use a bibcode
needsbib = list(OrderedDict.fromkeys(needsbib))
with open('../itep-needsbib.txt', 'w') as f:
f.writelines(["%s\n" % i for i in needsbib])
journal_events()
# Now import the Asiago catalog
if do_task(task, 'asiago'):
#response = urllib.request.urlopen('http://graspa.oapd.inaf.it/cgi-bin/sncat.php')
path = os.path.abspath('../sne-external/asiago-cat.php')
response = urllib.request.urlopen('file://' + path)
html = response.read().decode('utf-8')
html = html.replace("\r", "")
soup = BeautifulSoup(html, "html5lib")
table = soup.find("table")
records = []
for r, row in enumerate(table.findAll('tr')):
if r == 0:
continue
col = row.findAll('td')
records.append([utf8(x.renderContents()) for x in col])
for record in tq(records, currenttask):
if len(record) > 1 and record[1] != '':
name = snname("SN" + record[1]).strip('?')
name = add_event(name)
reference = 'Asiago Supernova Catalogue'
refurl = 'http://graspa.oapd.inaf.it/cgi-bin/sncat.php'
refbib = '1989A&AS...81..421B'
source = add_source(name, refname = reference, url = refurl, bibcode = refbib, secondary = True)
add_quantity(name, 'alias', name, source)
year = re.findall(r'\d+', name)[0]
add_quantity(name, 'discoverdate', year, source)
hostname = record[2]
hostra = record[3]
hostdec = record[4]
ra = record[5].strip(':')
dec = record[6].strip(':')
redvel = record[11].strip(':')
discoverer = record[19]
datestring = year
monthday = record[18]
if "*" in monthday:
datekey = 'discover'
else:
datekey = 'max'
if monthday.strip() != '':
monthstr = ''.join(re.findall("[a-zA-Z]+", monthday))
monthstr = str(list(calendar.month_abbr).index(monthstr))
datestring = datestring + '/' + monthstr
dayarr = re.findall(r'\d+', monthday)
if dayarr:
daystr = dayarr[0]
datestring = datestring + '/' + daystr
add_quantity(name, datekey + 'date', datestring, source)
velocity = ''
redshift = ''
if redvel != '':
if round(float(redvel)) == float(redvel):
velocity = int(redvel)
else:
redshift = float(redvel)
redshift = str(redshift)
velocity = str(velocity)
claimedtype = record[17].replace(':', '').replace('*', '').strip()
if (hostname != ''):
add_quantity(name, 'host', hostname, source)
if (claimedtype != ''):
add_quantity(name, 'claimedtype', claimedtype, source)
if (redshift != ''):
add_quantity(name, 'redshift', redshift, source, kind = 'host')
if (velocity != ''):
add_quantity(name, 'velocity', velocity, source, kind = 'host')
if (hostra != ''):
add_quantity(name, 'hostra', hostra, source, unit = 'nospace')
if (hostdec != ''):
add_quantity(name, 'hostdec', hostdec, source, unit = 'nospace')
if (ra != ''):
add_quantity(name, 'ra', ra, source, unit = 'nospace')
if (dec != ''):
add_quantity(name, 'dec', dec, source, unit = 'nospace')
if (discoverer != ''):
add_quantity(name, 'discoverer', discoverer, source)
journal_events()
if do_task(task, 'lennarz'):
Vizier.ROW_LIMIT = -1
result = Vizier.get_catalogs("J/A+A/538/A120/usc")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
bibcode = "2012A&A...538A.120L"
for row in tq(table, currenttask):
row = convert_aq_output(row)
name = 'SN' + row['SN']
name = add_event(name)
source = add_source(name, bibcode = bibcode)
add_quantity(name, 'alias', name, source)
if row['RAJ2000']:
add_quantity(name, 'ra', row['RAJ2000'], source)
if row['DEJ2000']:
add_quantity(name, 'dec', row['DEJ2000'], source)
if row['RAG']:
add_quantity(name, 'hostra', row['RAG'], source)
if row['DEG']:
add_quantity(name, 'hostdec', row['DEG'], source)
if row['Gal']:
add_quantity(name, 'host', row['Gal'], source)
if row['Type']:
claimedtypes = row['Type'].split('|')
for claimedtype in claimedtypes:
add_quantity(name, 'claimedtype', claimedtype.strip(' -'), source)
if row['z']:
if name not in ['SN1985D', 'SN2004cq']:
add_quantity(name, 'redshift', row['z'], source, kind = 'host')
if row['Dist']:
if row['e_Dist']:
add_quantity(name, 'lumdist', row['Dist'], source, error = row['e_Dist'], kind = 'host')
else:
add_quantity(name, 'lumdist', row['Dist'], source, kind = 'host')
if row['Ddate']:
datestring = row['Ddate'].replace('-', '/')
add_quantity(name, 'discoverdate', datestring, source)
if 'photometry' not in events[name]:
if 'Dmag' in row and is_number(row['Dmag']) and not isnan(float(row['Dmag'])):
datesplit = row['Ddate'].strip().split('-')
if len(datesplit) == 3:
datestr = row['Ddate'].strip()
elif len(datesplit) == 2:
datestr = row['Ddate'].strip() + '-01'
elif len(datesplit) == 1:
datestr = row['Ddate'].strip() + '-01-01'
mjd = str(astrotime(datestr).mjd)
add_photometry(name, time = mjd, band = row['Dband'], magnitude = row['Dmag'], source = source)
if row['Mdate']:
datestring = row['Mdate'].replace('-', '/')
add_quantity(name, 'maxdate', datestring, source)
if 'photometry' not in events[name]:
if 'MMag' in row and is_number(row['MMag']) and not isnan(float(row['MMag'])):
datesplit = row['Mdate'].strip().split('-')
if len(datesplit) == 3:
datestr = row['Mdate'].strip()
elif len(datesplit) == 2:
datestr = row['Mdate'].strip() + '-01'
elif len(datesplit) == 1:
datestr = row['Mdate'].strip() + '-01-01'
mjd = str(astrotime(datestr).mjd)
add_photometry(name, time = mjd, band = row['Mband'], magnitude = row['Mmag'], source = source)
f.close()
journal_events()
if do_task(task, 'fermi'):
with open("../sne-external/1SC_catalog_v01.asc", 'r') as f:
tsvin = csv.reader(f, delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask)):
if row[0].startswith('#'):
if len(row) > 1 and 'UPPER_LIMITS' in row[1]:
break
continue
if 'Classified' not in row[1]:
continue
name = row[0].replace('SNR', 'G')
name = add_event(name)
source = add_source(name, bibcode = '2016ApJS..224....8A')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'alias', row[0].replace('SNR', 'MWSNR'), source)
add_quantity(name, 'ra', row[2], source, unit = 'floatdegrees')
add_quantity(name, 'dec', row[3], source, unit = 'floatdegrees')
journal_events()
if do_task(task, 'tns'):
session = requests.Session()
csvtxt = load_cached_url("https://wis-tns.weizmann.ac.il/search?&num_page=1&format=html&sort=desc&order=id&format=csv&page=0",
"../sne-external/TNS/index.csv")
if not csvtxt:
continue
maxid = csvtxt.splitlines()[1].split(",")[0].strip('"')
maxpages = ceil(int(maxid)/1000.)
for page in tq(range(maxpages), currenttask):
fname = '../sne-external/TNS/page-' + str(page).zfill(2) + '.csv'
if archived_task('tns') and os.path.isfile(fname) and page < 7:
with open(fname, 'r') as f:
csvtxt = f.read()
else:
with open(fname, 'w') as f:
session = requests.Session()
response = session.get("https://wis-tns.weizmann.ac.il/search?&num_page=1000&format=html&edit[type]=&edit[objname]=&edit[id]=&sort=asc&order=id&display[redshift]=1&display[hostname]=1&display[host_redshift]=1&display[source_group_name]=1&display[programs_name]=1&display[internal_name]=1&display[isTNS_AT]=1&display[public]=1&display[end_pop_period]=0&display[spectra_count]=1&display[discoverymag]=1&display[discmagfilter]=1&display[discoverydate]=1&display[discoverer]=1&display[sources]=1&display[bibcode]=1&format=csv&page=" + str(page))
csvtxt = response.text
f.write(csvtxt)
tsvin = csv.reader(csvtxt.splitlines(), delimiter=',')
for ri, row in enumerate(tq(tsvin, currenttask, leave = False)):
if ri == 0:
continue
if row[4] and 'SN' not in row[4]:
continue
name = row[1].replace(' ', '')
name = add_event(name)
source = add_source(name, refname = 'Transient Name Server', url = 'https://wis-tns.weizmann.ac.il')
add_quantity(name, 'alias', name, source)
if row[2] and row[2] != '00:00:00.00':
add_quantity(name, 'ra', row[2], source)
if row[3] and row[3] != '+00:00:00.00':
add_quantity(name, 'dec', row[3], source)
if row[4]:
add_quantity(name, 'claimedtype', row[4].replace('SN', '').strip(), source)
if row[5]:
add_quantity(name, 'redshift', row[5], source, kind = 'spectroscopic')
if row[6]:
add_quantity(name, 'host', row[6], source)
if row[7]:
add_quantity(name, 'redshift', row[7], source, kind = 'host')
if row[8]:
add_quantity(name, 'discoverer', row[8], source)
# Currently, all events listing all possible observers. TNS bug?
#if row[9]:
# observers = row[9].split(',')
# for observer in observers:
# add_quantity(name, 'observer', observer.strip(), source)
if row[10]:
add_quantity(name, 'alias', row[10], source)
if row[8] and row[14] and row[15] and row[16]:
survey = row[8]
magnitude = row[14]
band = row[15].split('-')[0]
mjd = astrotime(row[16]).mjd
add_photometry(name, time = mjd, magnitude = magnitude, band = band, survey = survey, source = source)
if row[16]:
date = row[16].split()[0].replace('-', '/')
if date != '0000/00/00':
date = date.replace('/00', '')
time = row[16].split()[1]
if time != '00:00:00':
ts = time.split(':')
date += pretty_num(timedelta(hours = int(ts[0]), minutes = int(ts[1]), seconds = int(ts[2])).total_seconds()/(24*60*60), sig=6).lstrip('0')
add_quantity(name, 'discoverdate', date, source)
if args.update:
journal_events()
journal_events()
if do_task(task, 'rochester'):
rochesterpaths = ['http://www.rochesterastronomy.org/snimages/snredshiftall.html', 'http://www.rochesterastronomy.org/sn2016/snredshift.html']
rochesterupdate = [False, True]
for p, path in enumerate(tq(rochesterpaths, currenttask)):
if args.update and not rochesterupdate[p]:
continue
filepath = '../sne-external/rochester/' + os.path.basename(path)
html = load_cached_url(path, filepath)
if not html:
continue
soup = BeautifulSoup(html, "html5lib")
rows = soup.findAll('tr')
secondaryreference = "Latest Supernovae"
secondaryrefurl = "http://www.rochesterastronomy.org/snimages/snredshiftall.html"
for r, row in enumerate(tq(rows, currenttask)):
if r == 0:
continue
cols = row.findAll('td')
if not len(cols):
continue
name = ''
if cols[14].contents:
aka = str(cols[14].contents[0]).strip()
if is_number(aka.strip('?')):
aka = 'SN' + aka.strip('?') + 'A'
name = add_event(aka)
elif len(aka) >= 4 and is_number(aka[:4]):
aka = 'SN' + aka
name = add_event(aka)
ra = str(cols[3].contents[0]).strip()
dec = str(cols[4].contents[0]).strip()
sn = re.sub('<[^<]+?>', '', str(cols[0].contents[0])).strip()
if is_number(sn.strip('?')):
sn = 'SN' + sn.strip('?') + 'A'
elif len(sn) >= 4 and is_number(sn[:4]):
sn = 'SN' + sn
if not name:
if not sn:
continue
if sn[:8] == 'MASTER J':
sn = sn.replace('MASTER J', 'MASTER OT J').replace('SNHunt', 'SNhunt')
if 'POSSIBLE' in sn.upper() and ra and dec:
sn = 'PSN J' + ra.replace(':', '').replace('.', '') + dec.replace(':', '').replace('.', '')
name = add_event(sn)
reference = cols[12].findAll('a')[0].contents[0].strip()
refurl = cols[12].findAll('a')[0]['href'].strip()
source = add_source(name, refname = reference, url = refurl)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
sources = uniq_cdl(list(filter(None, [source, secondarysource])))
add_quantity(name, 'alias', name, sources)
add_quantity(name, 'alias', sn, sources)
if cols[14].contents:
if aka == 'SNR G1.9+0.3':
aka = 'G001.9+00.3'
if aka[:4] == 'PS1 ':
aka = 'PS1-' + aka[4:]
if aka[:8] == 'MASTER J':
aka = aka.replace('MASTER J', 'MASTER OT J').replace('SNHunt', 'SNhunt')
if 'POSSIBLE' in aka.upper() and ra and dec:
aka = 'PSN J' + ra.replace(':', '').replace('.', '') + dec.replace(':', '').replace('.', '')
add_quantity(name, 'alias', aka, sources)
if str(cols[1].contents[0]).strip() != 'unk':
add_quantity(name, 'claimedtype', str(cols[1].contents[0]).strip(' :,'), sources)
if str(cols[2].contents[0]).strip() != 'anonymous':
add_quantity(name, 'host', str(cols[2].contents[0]).strip(), sources)
add_quantity(name, 'ra', ra, sources)
add_quantity(name, 'dec', dec, sources)
if str(cols[6].contents[0]).strip() not in ['2440587', '2440587.292']:
astrot = astrotime(float(str(cols[6].contents[0]).strip()), format='jd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), sources)
if str(cols[7].contents[0]).strip() not in ['2440587', '2440587.292']:
astrot = astrotime(float(str(cols[7].contents[0]).strip()), format='jd')
if (float(str(cols[8].contents[0]).strip()) <= 90.0 and
not any('GRB' in x for x in get_aliases(name))):
add_photometry(name, time = str(astrot.mjd), magnitude = str(cols[8].contents[0]).strip(), source = sources)
if cols[11].contents[0] != 'n/a':
add_quantity(name, 'redshift', str(cols[11].contents[0]).strip(), sources)
add_quantity(name, 'discoverer', str(cols[13].contents[0]).strip(), sources)
if args.update:
journal_events()
if not args.update:
vsnetfiles = ["latestsne.dat"]
for vsnetfile in vsnetfiles:
f = open("../sne-external/" + vsnetfile,'r',encoding='latin1')
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
for r, row in enumerate(tsvin):
if not row or row[0][:4] in ['http', 'www.'] or len(row) < 3:
continue
name = row[0].strip()
if name[:4].isdigit():
name = 'SN' + name
if name.startswith('PSNJ'):
name = 'PSN J' + name[4:]
if name.startswith('MASTEROTJ'):
name = name.replace('MASTEROTJ', 'MASTER OT J')
name = add_event(name)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if not is_number(row[1]):
continue
year = row[1][:4]
month = row[1][4:6]
day = row[1][6:]
if '.' not in day:
day = day[:2] + '.' + day[2:]
mjd = astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day))
magnitude = row[2].rstrip(ascii_letters)
if not is_number(magnitude):
continue
if magnitude.isdigit():
if int(magnitude) > 100:
magnitude = magnitude[:2] + '.' + magnitude[2:]
if float(str(cols[8].contents[0]).strip()) >= 90.0:
continue
if len(row) >= 4:
if is_number(row[3]):
e_magnitude = row[3]
refind = 4
else:
e_magnitude = ''
refind = 3
if refind >= len(row):
sources = secondarysource
else:
reference = ' '.join(row[refind:])
source = add_source(name, refname = reference)
add_quantity(name, 'alias', name, secondarysource)
sources = uniq_cdl([source,secondarysource])
else:
sources = secondarysource
band = row[2].lstrip('1234567890.')
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, source = sources)
f.close()
journal_events()
if do_task(task, 'ogle'):
basenames = ['transients', 'transients/2014b', 'transients/2014', 'transients/2013', 'transients/2012']
oglenames = []
ogleupdate = [True, False, False, False, False]
for b, bn in enumerate(tq(basenames, currenttask)):
if args.update and not ogleupdate[b]:
continue
filepath = '../sne-external/OGLE-' + bn.replace('/', '-') + '-transients.html'
htmltxt = load_cached_url('http://ogle.astrouw.edu.pl/ogle4/' + bn + '/transients.html', filepath)
if not htmltxt:
continue
soup = BeautifulSoup(htmltxt, "html5lib")
links = soup.findAll('a')
breaks = soup.findAll('br')
datalinks = []
datafnames = []
for a in links:
if a.has_attr('href'):
if '.dat' in a['href']:
datalinks.append('http://ogle.astrouw.edu.pl/ogle4/' + bn + '/' + a['href'])
datafnames.append(bn.replace('/', '-') + '-' + a['href'].replace('/', '-'))
ec = -1
reference = 'OGLE-IV Transient Detection System'
refurl = 'http://ogle.astrouw.edu.pl/ogle4/transients/transients.html'
for br in tq(breaks, currenttask):
sibling = br.nextSibling
if 'Ra,Dec=' in sibling:
line = sibling.replace("\n", '').split('Ra,Dec=')
name = line[0].strip()
ec += 1
if 'NOVA' in name or 'dupl' in name:
continue
if name in oglenames:
continue
oglenames.append(name)
name = add_event(name)
mySibling = sibling.nextSibling
atelref = ''
claimedtype = ''
while 'Ra,Dec=' not in mySibling:
if isinstance(mySibling, NavigableString):
if 'Phot.class=' in str(mySibling):
claimedtype = re.sub(r'\([^)]*\)', '', str(mySibling).split('=')[-1]).replace('SN','').strip()
if isinstance(mySibling, Tag):
atela = mySibling
if atela and atela.has_attr('href') and 'astronomerstelegram' in atela['href']:
atelref = atela.contents[0].strip()
atelurl = atela['href']
mySibling = mySibling.nextSibling
if mySibling is None:
break
nextSibling = sibling.nextSibling
if isinstance(nextSibling, Tag) and nextSibling.has_attr('alt') and nextSibling.contents[0].strip() != 'NED':
radec = nextSibling.contents[0].strip().split()
else:
radec = line[-1].split()
ra = radec[0]
dec = radec[1]
fname = '../sne-external/OGLE/' + datafnames[ec]
if not args.fullrefresh and archived_task('ogle') and os.path.isfile(fname):
with open(fname, 'r') as f:
csvtxt = f.read()
else:
response = urllib.request.urlopen(datalinks[ec])
with open(fname, 'w') as f:
csvtxt = response.read().decode('utf-8')
f.write(csvtxt)
lcdat = csvtxt.splitlines()
sources = [add_source(name, refname = reference, url = refurl)]
add_quantity(name, 'alias', name, sources[0])
if atelref and atelref != 'ATel#----':
sources.append(add_source(name, refname = atelref, url = atelurl))
sources = uniq_cdl(sources)
if name.startswith('OGLE'):
if name[4] == '-':
if is_number(name[5:9]):
add_quantity(name, 'discoverdate', name[5:9], sources)
else:
if is_number(name[4:6]):
add_quantity(name, 'discoverdate', '20' + name[4:6], sources)
# RA and Dec from OGLE pages currently not reliable
#add_quantity(name, 'ra', ra, sources)
#add_quantity(name, 'dec', dec, sources)
if claimedtype and claimedtype != '-':
add_quantity(name, 'claimedtype', claimedtype, sources)
elif 'SN' not in name and 'claimedtype' not in events[name]:
add_quantity(name, 'claimedtype', 'Candidate', sources)
for row in lcdat:
row = row.split()
mjd = str(jd_to_mjd(Decimal(row[0])))
magnitude = row[1]
if float(magnitude) > 90.0:
continue
e_magnitude = row[2]
upperlimit = False
if e_magnitude == '-1' or float(e_magnitude) > 10.0:
e_magnitude = ''
upperlimit = True
add_photometry(name, time = mjd, band = 'I', magnitude = magnitude, e_magnitude = e_magnitude,
system = 'Vega', source = sources, upperlimit = upperlimit)
if args.update:
journal_events()
journal_events()
if do_task(task, 'snls'):
with open("../sne-external/SNLS-ugriz.dat", 'r') as f:
data = csv.reader(f, delimiter=' ', quotechar='"', skipinitialspace = True)
for row in data:
flux = row[3]
err = row[4]
# Being extra strict here with the flux constraint, see note below.
if float(flux) < 3.0*float(err):
continue
name = 'SNLS-' + row[0]
name = add_event(name)
source = add_source(name, bibcode = '2010A&A...523A...7G')
add_quantity(name, 'alias', name, source)
band = row[1]
mjd = row[2]
sig = get_sig_digits(flux.split('E')[0])+1
# Conversion comes from SNLS-Readme
# NOTE: Datafiles available for download suggest different zeropoints than 30, need to inquire.
magnitude = pretty_num(30.0-2.5*log10(float(flux)), sig = sig)
e_magnitude = pretty_num(2.5*log10(1.0 + float(err)/float(flux)), sig = sig)
#e_magnitude = pretty_num(2.5*(log10(float(flux) + float(err)) - log10(float(flux))), sig = sig)
add_photometry(name, time = mjd, band = band, magnitude = magnitude, e_magnitude = e_magnitude, counts = flux,
e_counts = err, source = source)
journal_events()
if do_task(task, 'psthreepi'):
fname = '../sne-external/3pi/page00.html'
html = load_cached_url("http://psweb.mp.qub.ac.uk/ps1threepi/psdb/public/?page=1&sort=followup_flag_date", fname, write = False)
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
div = bs.find('div', {"class":"pagination"})
offline = False
if not div:
offline = True
else:
links = div.findAll('a')
if not links:
offline = True
if offline:
if args.update:
continue
warnings.warn("Pan-STARRS 3pi offline, using local files only.")
with open(fname, 'r') as f:
html = f.read()
bs = BeautifulSoup(html, "html5lib")
div = bs.find('div', {"class":"pagination"})
links = div.findAll('a')
else:
with open(fname, 'w') as f:
f.write(html)
numpages = int(links[-2].contents[0])
oldnumpages = len(glob('../sne-external/3pi/page*'))
for page in tq(range(1,numpages), currenttask):
fname = '../sne-external/3pi/page' + str(page).zfill(2) + '.html'
if not args.fullrefresh and archived_task('psthreepi') and os.path.isfile(fname) and page < oldnumpages:
with open(fname, 'r') as f:
html = f.read()
elif not offline:
response = urllib.request.urlopen("http://psweb.mp.qub.ac.uk/ps1threepi/psdb/public/?page=" + str(page) + "&sort=followup_flag_date")
with open(fname, 'w') as f:
html = response.read().decode('utf-8')
f.write(html)
else:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.findAll('tr')
for tr in tq(trs, currenttask):
tds = tr.findAll('td')
if not tds:
continue
refs = []
aliases = []
ttype = ''
ctype = ''
for tdi, td in enumerate(tds):
if tdi == 0:
psname = td.contents[0]
pslink = psname['href']
psname = psname.text
elif tdi == 1:
ra = td.contents[0]
elif tdi == 2:
dec = td.contents[0]
elif tdi == 3:
ttype = td.contents[0]
if ttype != 'sn' and ttype != 'orphan':
break
elif tdi == 5:
if not td.contents:
continue
ctype = td.contents[0]
if ctype == 'Observed':
ctype = ''
elif tdi == 16:
if td.contents:
crossrefs = td.findAll('a')
for cref in crossrefs:
if 'atel' in cref.contents[0].lower():
refs.append([cref.contents[0], cref['href']])
elif is_number(cref.contents[0][:4]):
continue
else:
aliases.append(cref.contents[0])
if ttype != 'sn' and ttype != 'orphan':
continue
name = ''
for alias in aliases:
if alias[:2] == 'SN':
name = alias
if not name:
name = psname
name = add_event(name)
sources = [add_source(name, refname = 'Pan-STARRS 3Pi', url = 'http://psweb.mp.qub.ac.uk/ps1threepi/psdb/')]
add_quantity(name, 'alias', name, sources[0])
for ref in refs:
sources.append(add_source(name, refname = ref[0], url = ref[1]))
source = uniq_cdl(sources)
for alias in aliases:
newalias = alias
if alias[:3] in ['CSS', 'SSS', 'MLS']:
newalias = alias.replace('-', ':', 1)
newalias = newalias.replace('PSNJ', 'PSN J')
add_quantity(name, 'alias', newalias, source)
add_quantity(name, 'ra', ra, source)
add_quantity(name, 'dec', dec, source)
add_quantity(name, 'claimedtype', ctype, source)
fname2 = '../sne-external/3pi/candidate-' + pslink.rstrip('/').split('/')[-1] + '.html'
if archived_task('psthreepi') and os.path.isfile(fname2):
with open(fname2, 'r') as f:
html2 = f.read()
elif not offline:
pslink = 'http://psweb.mp.qub.ac.uk/ps1threepi/psdb/public/' + pslink
with open(fname2, 'w') as f:
response2 = urllib.request.urlopen(pslink)
html2 = response2.read().decode('utf-8')
f.write(html2)
else:
continue
bs2 = BeautifulSoup(html2, "html5lib")
scripts = bs2.findAll('script')
nslines = []
nslabels = []
for script in scripts:
if 'jslcdata.push' not in script.text:
continue
slines = script.text.splitlines()
for line in slines:
if 'jslcdata.push' in line:
nslines.append(json.loads(line.strip().replace('jslcdata.push(','').replace(');','')))
if 'jslabels.push' in line and 'blanks' not in line and 'non det' not in line:
nslabels.append(json.loads(line.strip().replace('jslabels.push(','').replace(');',''))['label'])
for li, line in enumerate(nslines[:len(nslabels)]):
if not line:
continue
for obs in line:
add_photometry(name, time = str(obs[0]), band = nslabels[li], magnitude = str(obs[1]), e_magnitude = str(obs[2]), source = source,
telescope = 'Pan-STARRS1')
for li, line in enumerate(nslines[2*len(nslabels):]):
if not line:
continue
for obs in line:
add_photometry(name, time = str(obs[0]), band = nslabels[li], magnitude = str(obs[1]), upperlimit = True, source = source,
telescope = 'Pan-STARRS1')
assoctab = bs2.find('table', {"class":"generictable"})
hostname = ''
redshift = ''
if assoctab:
trs = assoctab.findAll('tr')
headertds = [x.contents[0] for x in trs[1].findAll('td')]
tds = trs[1].findAll('td')
for tdi, td in enumerate(tds):
if tdi == 1:
hostname = td.contents[0].strip()
elif tdi == 4:
if 'z' in headertds:
redshift = td.contents[0].strip()
# Skip galaxies with just SDSS id
if is_number(hostname):
continue
add_quantity(name, 'host', hostname, source)
if redshift:
add_quantity(name, 'redshift', redshift, source, kind = 'host')
if args.update:
journal_events()
journal_events()
if do_task(task, 'psmds'):
with open('../sne-external/MDS/apj506838t1_mrt.txt') as f:
for ri, row in enumerate(tq(f.read().splitlines(), currenttask)):
if ri < 35:
continue
cols = [x.strip() for x in row.split(',')]
name = add_event(cols[0])
source = add_source(name, bibcode = '2015ApJ...799..208S')
add_quantity(name, 'alias', name, source)
add_quantity(name, 'ra', cols[2], source)
add_quantity(name, 'dec', cols[3], source)
astrot = astrotime(float(cols[4]), format='mjd').datetime
add_quantity(name, 'discoverdate', make_date_string(astrot.year, astrot.month, astrot.day), source)
add_quantity(name, 'redshift', cols[5], source, kind = 'spectroscopic')
add_quantity(name, 'claimedtype', 'II P', source)
journal_events()
if do_task(task, 'crts'):
crtsnameerrors = ['2011ax']
folders = ["catalina", "MLS", "SSS"]
for fold in tq(folders, currenttask):
html = load_cached_url("http://nesssi.cacr.caltech.edu/" + fold + "/AllSN.html", '../sne-external/CRTS/' + fold + '.html')
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.findAll('tr')
for tr in tq(trs, currenttask):
tds = tr.findAll('td')
if not tds:
continue
refs = []
aliases = []
ttype = ''
ctype = ''
for tdi, td in enumerate(tds):
if tdi == 0:
crtsname = td.contents[0].text.strip()
elif tdi == 1:
ra = td.contents[0]
elif tdi == 2:
dec = td.contents[0]
elif tdi == 11:
lclink = td.find('a')['onclick']
lclink = lclink.split("'")[1]
elif tdi == 13:
aliases = re.sub('[()]', '', re.sub('<[^<]+?>', '', td.contents[-1].strip()))
aliases = [x.strip('; ') for x in list(filter(None, aliases.split(' ')))]
name = ''
hostmag = ''
hostupper = False
validaliases = []
for ai, alias in enumerate(aliases):
if alias in ['SN', 'SDSS']:
continue
if alias in crtsnameerrors:
continue
if alias == 'mag':
if ai < len(aliases) - 1:
ind = ai+1
if aliases[ai+1] in ['SDSS']:
ind = ai+2
elif aliases[ai+1] in ['gal', 'obj', 'object', 'source']:
ind = ai-1
if '>' in aliases[ind]:
hostupper = True
hostmag = aliases[ind].strip('>~').replace(',', '.')
continue
if is_number(alias[:4]) and alias[:2] == '20' and len(alias) > 4:
name = 'SN' + alias
lalias = alias.lower()
if (('asassn' in alias and len(alias) > 6) or ('ptf' in alias and len(alias) > 3) or
('ps1' in alias and len(alias) > 3) or 'snhunt' in alias or
('mls' in alias and len(alias) > 3) or 'gaia' in alias or ('lsq' in alias and len(alias) > 3)):
alias = alias.replace('SNHunt', 'SNhunt')
validaliases.append(alias)
if not name:
name = crtsname
name = add_event(name)
source = add_source(name, refname = 'Catalina Sky Survey', bibcode = '2009ApJ...696..870D',
url = 'http://nesssi.cacr.caltech.edu/catalina/AllSN.html')
add_quantity(name, 'alias', name, source)
for alias in validaliases:
add_quantity(name, 'alias', alias, source)
add_quantity(name, 'ra', ra, source, unit = 'floatdegrees')
add_quantity(name, 'dec', dec, source, unit = 'floatdegrees')
if hostmag:
# 1.0 magnitude error based on Drake 2009 assertion that SN are only considered real if they are 2 mags brighter than host.
add_photometry(name, band = 'C', magnitude = hostmag, e_magnitude = 1.0, source = source, host = True,
telescope = 'Catalina Schmidt', upperlimit = hostupper)
fname2 = '../sne-external/' + fold + '/' + lclink.split('.')[-2].rstrip('p').split('/')[-1] + '.html'
if not args.fullrefresh and archived_task('crts') and os.path.isfile(fname2):
with open(fname2, 'r') as f:
html2 = f.read()
else:
with open(fname2, 'w') as f:
response2 = urllib.request.urlopen(lclink)
html2 = response2.read().decode('utf-8')
f.write(html2)
lines = html2.splitlines()
for line in lines:
if 'javascript:showx' in line:
mjdstr = re.search("showx\('(.*?)'\)", line).group(1).split('(')[0].strip()
if not is_number(mjdstr):
continue
mjd = str(Decimal(mjdstr) + Decimal(53249.0))
else:
continue
if 'javascript:showy' in line:
mag = re.search("showy\('(.*?)'\)", line).group(1)
if 'javascript:showz' in line:
err = re.search("showz\('(.*?)'\)", line).group(1)
add_photometry(name, time = mjd, band = 'C', magnitude = mag, source = source, includeshost = True,
telescope = 'Catalina Schmidt', e_magnitude = err if float(err) > 0.0 else '', upperlimit = (float(err) == 0.0))
if args.update:
journal_events()
journal_events()
if do_task(task, 'snhunt'):
html = load_cached_url('http://nesssi.cacr.caltech.edu/catalina/current.html', '../sne-external/SNhunt/current.html')
if not html:
continue
text = html.splitlines()
findtable = False
for ri, row in enumerate(text):
if 'Supernova Discoveries' in row:
findtable = True
if findtable and '<table' in row:
tstart = ri+1
if findtable and '</table>' in row:
tend = ri-1
tablestr = '<html><body><table>'
for row in text[tstart:tend]:
if row[:3] == 'tr>':
tablestr = tablestr + '<tr>' + row[3:]
else:
tablestr = tablestr + row
tablestr = tablestr + '</table></body></html>'
bs = BeautifulSoup(tablestr, 'html5lib')
trs = bs.find('table').findAll('tr')
for tr in tq(trs, currenttask):
cols = [str(x.text) for x in tr.findAll('td')]
if not cols:
continue
name = re.sub('<[^<]+?>', '', cols[4]).strip().replace(' ', '').replace('SNHunt', 'SNhunt')
name = add_event(name)
source = add_source(name, refname = 'Supernova Hunt', url = 'http://nesssi.cacr.caltech.edu/catalina/current.html')
add_quantity(name, 'alias', name, source)
host = re.sub('<[^<]+?>', '', cols[1]).strip().replace('_', ' ')
add_quantity(name, 'host', host, source)
add_quantity(name, 'ra', cols[2], source, unit = 'floatdegrees')
add_quantity(name, 'dec', cols[3], source, unit = 'floatdegrees')
dd = cols[0]
discoverdate = dd[:4] + '/' + dd[4:6] + '/' + dd[6:8]
add_quantity(name, 'discoverdate', discoverdate, source)
discoverers = cols[5].split('/')
for discoverer in discoverers:
add_quantity(name, 'discoverer', 'CRTS', source)
add_quantity(name, 'discoverer', discoverer, source)
if args.update:
journal_events()
journal_events()
if do_task(task, 'nedd'):
f = open("../sne-external/NED25.12.1-D-10.4.0-20151123.csv", 'r')
data = csv.reader(f, delimiter=',', quotechar='"')
reference = "NED-D"
refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
nedddict = OrderedDict()
oldhostname = ''
for r, row in enumerate(data):
if r <= 12:
continue
hostname = row[3]
if args.update and oldhostname != hostname:
journal_events()
distmod = row[4]
moderr = row[5]
dist = row[6]
bibcode = unescape(row[8])
name = ''
if hostname.startswith('SN '):
if is_number(hostname[3:7]):
name = 'SN' + hostname[3:]
else:
name = hostname[3:]
elif hostname.startswith('SNLS '):
name = 'SNLS-' + hostname[5:].split()[0]
else:
cleanhost = hostname.replace('MESSIER 0', 'M').replace('MESSIER ', 'M').strip()
if True in [x in cleanhost for x in ['UGC', 'PGC', 'IC']]:
cleanhost = ' '.join([x.lstrip('0') for x in cleanhost.split()])
if 'ESO' in cleanhost:
cleanhost = cleanhost.replace(' ', '').replace('ESO', 'ESO ')
nedddict.setdefault(cleanhost,[]).append(Decimal(dist))
if name:
name = add_event(name)
secondarysource = add_source(name, refname = reference, url = refurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if bibcode:
source = add_source(name, bibcode = bibcode)
sources = uniq_cdl([source, secondarysource])
else:
sources = secondarysource
add_quantity(name, 'comovingdist', dist, sources)
oldhostname = hostname
journal_events()
# Import CPCS
if do_task(task, 'cpcs'):
jsontxt = load_cached_url("http://gsaweb.ast.cam.ac.uk/followup/list_of_alerts?format=json&num=100000&published=1&observed_only=1&hashtag=JG_530ad9462a0b8785bfb385614bf178c6",
"../sne-external/CPCS/index.json")
if not jsontxt:
continue
alertindex = json.loads(jsontxt, object_pairs_hook=OrderedDict)
ids = [x["id"] for x in alertindex]
for i, ai in enumerate(tq(ids, currenttask)):
name = alertindex[i]['ivorn'].split('/')[-1].strip()
# Skip a few weird entries
if name == 'ASASSNli':
continue
# Just use a whitelist for now since naming seems inconsistent
if True in [x in name.upper() for x in ['GAIA', 'OGLE', 'ASASSN', 'MASTER', 'OTJ', 'PS1', 'IPTF']]:
name = name.replace('Verif', '').replace('_', ' ')
if 'ASASSN' in name and name[6] != '-':
name = 'ASASSN-' + name[6:]
if 'MASTEROTJ' in name:
name = name.replace('MASTEROTJ', 'MASTER OT J')
if 'OTJ' in name:
name = name.replace('OTJ', 'MASTER OT J')
if name.upper().startswith('IPTF'):
name = 'iPTF' + name[4:]
# Only add events that are classified as SN.
if event_exists(name):
continue
name = add_event(name)
else:
continue
secondarysource = add_source(name, refname = 'Cambridge Photometric Calibration Server', url = 'http://gsaweb.ast.cam.ac.uk/followup/', secondary = True)
add_quantity(name, 'alias', name, secondarysource)
add_quantity(name, 'ra', str(alertindex[i]['ra']), secondarysource, unit = 'floatdegrees')
add_quantity(name, 'dec', str(alertindex[i]['dec']), secondarysource, unit = 'floatdegrees')
alerturl = "http://gsaweb.ast.cam.ac.uk/followup/get_alert_lc_data?alert_id=" + str(ai)
source = add_source(name, refname = 'CPCS Alert ' + str(ai), url = alerturl)
fname = '../sne-external/CPCS/alert-' + str(ai).zfill(2) + '.json'
if archived_task('cpcs') and os.path.isfile(fname):
with open(fname, 'r') as f:
jsonstr = f.read()
else:
session = requests.Session()
response = session.get(alerturl + "&hashtag=JG_530ad9462a0b8785bfb385614bf178c6")
with open(fname, 'w') as f:
jsonstr = response.text
f.write(jsonstr)
try:
cpcsalert = json.loads(jsonstr)
except:
continue
mjds = [round_sig(x, sig=9) for x in cpcsalert['mjd']]
mags = [round_sig(x, sig=6) for x in cpcsalert['mag']]
errs = [round_sig(x, sig=6) if (is_number(x) and float(x) > 0.0) else '' for x in cpcsalert['magerr']]
bnds = cpcsalert['filter']
obs = cpcsalert['observatory']
for mi, mjd in enumerate(mjds):
add_photometry(name, time = mjd, magnitude = mags[mi], e_magnitude = errs[mi],
band = bnds[mi], observatory = obs[mi], source = uniq_cdl([source,secondarysource]))
if args.update:
journal_events()
journal_events()
if do_task(task, 'ptf'):
#response = urllib.request.urlopen("http://wiserep.weizmann.ac.il/objects/list")
#bs = BeautifulSoup(response, "html5lib")
#select = bs.find('select', {"name":"objid"})
#options = select.findAll('option')
#for option in options:
# print(option.text)
# name = option.text
# if ((name.startswith('PTF') and is_number(name[3:5])) or
# name.startswith('PTFS') or name.startswith('iPTF')):
# name = add_event(name)
if archived_task('ptf'):
with open('../sne-external/PTF/update.html', 'r') as f:
html = f.read()
else:
session = requests.Session()
response = session.get("http://wiserep.weizmann.ac.il/spectra/update")
html = response.text
with open('../sne-external/PTF/update.html', 'w') as f:
f.write(html)
bs = BeautifulSoup(html, "html5lib")
select = bs.find('select', {"name":"objid"})
options = select.findAll('option')
for option in options:
name = option.text
if ((name.startswith('PTF') and is_number(name[3:5])) or
name.startswith('PTFS') or name.startswith('iPTF')):
if '(' in name:
alias = name.split('(')[0].strip(' ')
name = name.split('(')[-1].strip(') ').replace('sn', 'SN')
name = add_event(name)
source = add_source(name, bibcode = '2012PASP..124..668Y')
add_quantity(name, 'alias', alias, source)
else:
name = add_event(name)
with open('../sne-external/PTF/old-ptf-events.csv') as f:
for suffix in f.read().splitlines():
name = add_event('PTF' + suffix)
with open('../sne-external/PTF/perly-2016.csv') as f:
for row in f.read().splitlines():
cols = [x.strip() for x in row.split(',')]
alias = ''
if cols[8]:
name = cols[8]
alias = 'PTF' + cols[0]
else:
name = 'PTF' + cols[0]
name = add_event(name)
source = add_source(name, bibcode = '2016arXiv160408207P')
add_quantity(name, 'alias', name, source)
if alias:
add_quantity(name, 'alias', alias, source)
add_quantity(name, 'ra', cols[1], source)
add_quantity(name, 'dec', cols[2], source)
add_quantity(name, 'claimedtype', 'SLSN-' + cols[3], source)
add_quantity(name, 'redshift', cols[4], source, kind = 'spectroscopic')
maxdate = cols[6].replace('-', '/')
add_quantity(name, 'maxdate', maxdate.lstrip('<'), source, upperlimit = maxdate.startswith('<'))
add_quantity(name, 'ebv', cols[7], source, kind = 'spectroscopic')
name = add_event('PTF' + suffix)
journal_events()
if do_task(task, 'des'):
html = load_cached_url("https://portal.nersc.gov/des-sn/transients/", "../sne-external/DES/transients.html")
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.find('tbody').findAll('tr')
for tri, tr in enumerate(tq(trs, currenttask)):
name = ''
source = ''
if tri == 0:
continue
tds = tr.findAll('td')
for tdi, td in enumerate(tds):
if tdi == 0:
name = add_event(td.text.strip())
if tdi == 1:
(ra, dec) = [x.strip() for x in td.text.split('\xa0')]
if tdi == 6:
atellink = td.find('a')
if atellink:
atellink = atellink['href']
else:
atellink = ''
sources = [add_source(name, url = 'https://portal.nersc.gov/des-sn/', refname = 'DES Bright Transients',
acknowledgment = 'http://www.noao.edu/noao/library/NOAO_Publications_Acknowledgments.html#DESdatause')]
if atellink:
sources.append(add_source(name, refname = 'ATel ' + atellink.split('=')[-1], url = atellink))
sources += [add_source(name, bibcode = '2012ApJ...753..152B'),
add_source(name, bibcode = '2015AJ....150..150F'),
add_source(name, bibcode = '2015AJ....150...82G'),
add_source(name, bibcode = '2015AJ....150..172K')]
sources = ','.join(sources)
add_quantity(name, 'alias', name, sources)
add_quantity(name, 'ra', ra, sources)
add_quantity(name, 'dec', dec, sources)
html2 = load_cached_url("https://portal.nersc.gov/des-sn/transients/" + name, "../sne-external/DES/" + name + ".html")
if not html2:
continue
lines = html2.splitlines()
for line in lines:
if 'var data = ' in line:
jsontxt = json.loads(line.split('=')[-1].rstrip(';'))
for i, band in enumerate(jsontxt['band']):
add_photometry(name, time = jsontxt['mjd'][i], magnitude = jsontxt['mag'][i], e_magnitude = jsontxt['mag_error'][i],
band = band, observatory = 'CTIO', telescope = 'Blanco 4m', instrument = 'DECam',
upperlimit = True if float(jsontxt['snr'][i]) <= 3.0 else '', source = sources)
journal_events()
if do_task(task, 'asassn'):
html = load_cached_url("http://www.astronomy.ohio-state.edu/~assassin/sn_list.html", "../sne-external/ASASSN/sn_list.html")
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.find('table').findAll('tr')
for tri, tr in enumerate(tq(trs, currenttask)):
name = ''
source = ''
ra = ''
dec = ''
redshift = ''
hostoff = ''
claimedtype = ''
host = ''
atellink = ''
typelink = ''
if tri == 0:
continue
tds = tr.findAll('td')
for tdi, td in enumerate(tds):
if tdi == 1:
name = add_event(td.text.strip())
atellink = td.find('a')
if atellink:
atellink = atellink['href']
else:
atellink = ''
if tdi == 2:
discdate = td.text.replace('-', '/')
if tdi == 3:
ra = td.text
if tdi == 4:
dec = td.text
if tdi == 5:
redshift = td.text
if tdi == 8:
hostoff = td.text
if tdi == 9:
claimedtype = td.text
typelink = td.find('a')
if typelink:
typelink = typelink['href']
else:
typelink = ''
if tdi == 12:
host = td.text
sources = [add_source(name, url = 'http://www.astronomy.ohio-state.edu/~assassin/sn_list.html', refname = 'ASAS-SN Supernovae')]
typesources = sources[:]
if atellink:
sources.append(add_source(name, refname = 'ATel ' + atellink.split('=')[-1], url = atellink))
if typelink:
typesources.append(add_source(name, refname = 'ATel ' + typelink.split('=')[-1], url = typelink))
sources = ','.join(sources)
typesources = ','.join(typesources)
add_quantity(name, 'alias', name, sources)
add_quantity(name, 'discoverdate', discdate, sources)
add_quantity(name, 'ra', ra, sources, unit = 'floatdegrees')
add_quantity(name, 'dec', dec, sources, unit = 'floatdegrees')
add_quantity(name, 'redshift', redshift, sources)
add_quantity(name, 'hostoffset', hostoff, sources, unit = 'arcseconds')
for ct in claimedtype.split('/'):
if ct != 'Unk':
add_quantity(name, 'claimedtype', ct, typesources)
if host != 'Uncatalogued':
add_quantity(name, 'host', host, sources)
journal_events()
if do_task(task, 'asiagospectra'):
html = load_cached_url("http://sngroup.oapd.inaf.it./cgi-bin/output_class.cgi?sn=1990", "../sne-external-spectra/Asiago/spectra.html")
if not html:
continue
bs = BeautifulSoup(html, "html5lib")
trs = bs.findAll('tr')
for tr in tq(trs, currenttask):
tds = tr.findAll('td')
name = ''
host = ''
fitsurl = ''
source = ''
reference = ''
for tdi, td in enumerate(tds):
if tdi == 0:
butt = td.find('button')
if not butt:
break
alias = butt.text.strip()
alias = alias.replace('PSNJ', 'PSN J').replace('GAIA', 'Gaia')
elif tdi == 1:
name = td.text.strip().replace('PSNJ', 'PSN J').replace('GAIA', 'Gaia')
if name.startswith('SN '):
name = 'SN' + name[3:]
if not name:
name = alias
if is_number(name[:4]):
name = 'SN' + name
name = add_event(name)
reference = 'Asiago Supernova Catalogue'
refurl = 'http://graspa.oapd.inaf.it/cgi-bin/sncat.php'
secondarysource = add_source(name, refname = reference, url = refurl, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if alias != name:
add_quantity(name, 'alias', alias, secondarysource)
elif tdi == 2:
host = td.text.strip()
if host == 'anonymous':
host = ''
elif tdi == 3:
discoverer = td.text.strip()
elif tdi == 5:
ra = td.text.strip()
elif tdi == 6:
dec = td.text.strip()
elif tdi == 7:
claimedtype = td.text.strip()
elif tdi == 8:
redshift = td.text.strip()
elif tdi == 9:
epochstr = td.text.strip()
if epochstr:
mjd = (astrotime(epochstr[:4] + '-' + epochstr[4:6] + '-' + str(floor(float(epochstr[6:]))).zfill(2)).mjd +
float(epochstr[6:]) - floor(float(epochstr[6:])))
else:
mjd = ''
elif tdi == 10:
refs = td.findAll('a')
source = ''
reference = ''
refurl = ''
for ref in refs:
if ref.text != 'REF':
reference = ref.text
refurl = ref['href']
if reference:
source = add_source(name, refname = reference, url = refurl)
add_quantity(name, 'alias', name, secondarysource)
sources = uniq_cdl(list(filter(None, [source, secondarysource])))
elif tdi == 12:
fitslink = td.find('a')
if fitslink:
fitsurl = fitslink['href']
if name:
add_quantity(name, 'claimedtype', claimedtype, sources)
add_quantity(name, 'ra', ra, sources)
add_quantity(name, 'dec', dec, sources)
add_quantity(name, 'redshift', redshift, sources)
add_quantity(name, 'discoverer', discoverer, sources)
add_quantity(name, 'host', host, sources)
#if fitsurl:
# response = urllib.request.urlopen("http://sngroup.oapd.inaf.it./" + fitsurl)
# compressed = io.BytesIO(response.read())
# decompressed = gzip.GzipFile(fileobj=compressed)
# hdulist = fits.open(decompressed)
# scidata = hdulist[0].data
# print(hdulist[0].header)
# print(scidata[3])
# sys.exit()
journal_events()
if do_task(task, 'wiserepspectra'):
secondaryreference = 'WISeREP'
secondaryrefurl = 'http://wiserep.weizmann.ac.il/'
secondarybibcode = '2012PASP..124..668Y'
wiserepcnt = 0
# These are known to be in error on the WISeREP page, either fix or ignore them.
wiserepbibcorrectdict = {'2000AJ....120..367G]':'2000AJ....120..367G',
'Harutyunyan+et+al.+2008':'2008A&A...488..383H',
'0609268':'2007AJ....133...58K',
'2006ApJ...636...400Q':'2006ApJ...636..400Q',
'2011ApJ...741...76':'2011ApJ...741...76C',
'2016PASP...128...961':'2016PASP..128...961',
'2002AJ....1124..417H':'2002AJ....1124.417H',
'2013ApJ…774…58D':'2013ApJ...774...58D',
'2011Sci.333..856S':'2011Sci...333..856S',
'2014MNRAS.438,368':'2014MNRAS.438..368T',
'2012MNRAS.420.1135':'2012MNRAS.420.1135S',
'2012Sci..337..942D':'2012Sci...337..942D',
'stt1839':''}
oldname = ''
for folder in tq(sorted(next(os.walk("../sne-external-WISEREP"))[1], key=lambda s: s.lower()), currenttask):
files = glob("../sne-external-WISEREP/" + folder + '/*')
for fname in tq(files, currenttask):
if '.html' in fname:
lfiles = deepcopy(files)
with open(fname, 'r') as f:
path = os.path.abspath(fname)
response = urllib.request.urlopen('file://' + path)
bs = BeautifulSoup(response, "html5lib")
trs = bs.findAll('tr', {'valign': 'top'})
for tri, tr in enumerate(trs):
if "Click to show/update object" in str(tr.contents):
claimedtype = ''
instrument = ''
epoch = ''
observer = ''
reducer = ''
specfile = ''
produceoutput = True
specpath = ''
tds = tr.findAll('td')
for tdi, td in enumerate(tds):
if td.contents:
if tdi == 3:
name = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
elif tdi == 5:
claimedtype = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
if claimedtype == 'SN':
claimedtype = ''
continue
if claimedtype[:3] == 'SN ':
claimedtype = claimedtype[3:].strip()
claimedtype = claimedtype.replace('-like', '').strip()
elif tdi == 9:
instrument = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
elif tdi == 11:
epoch = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
elif tdi == 13:
observer = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
if observer == 'Unknown' or observer == 'Other':
observer = ''
elif tdi == 17:
reducer = re.sub('<[^<]+?>', '', str(td.contents[0])).strip()
if reducer == 'Unknown' or reducer == 'Other':
reducer = ''
elif tdi == 25:
speclinks = td.findAll('a')
try:
for link in speclinks:
if 'Ascii' in link['href']:
specfile = link.contents[0].strip()
tfiles = deepcopy(lfiles)
for fi, fname in enumerate(lfiles):
if specfile in fname:
specpath = fname
del(tfiles[fi])
lfiles = deepcopy(tfiles)
raise(StopIteration)
except StopIteration:
pass
if not specpath:
warnings.warn('Spectrum file not found, "' + specfile + '"')
else:
continue
if "Spec Type:</span>" in str(tr.contents) and produceoutput:
produceoutput = False
trstr = str(tr)
result = re.search('redshift=(.*?)&', trstr)
redshift = ''
if result:
redshift = result.group(1)
if not is_number(redshift) or float(redshift) > 100.:
redshift = ''
result = re.search('publish=(.*?)&', trstr)
bibcode = ''
if result:
bibcode = unescape(urllib.parse.unquote(urllib.parse.unquote(result.group(1))).split('/')[-1])
if not bibcode:
biblink = tr.find('a', {'title': 'Link to NASA ADS'})
if biblink:
bibcode = biblink.contents[0]
if name.startswith('sn'):
name = 'SN' + name[2:]
if name.startswith(('CSS', 'SSS', 'MLS')) and ':' not in name:
name = name.replace('-', ':', 1)
if name.startswith('MASTERJ'):
name = name.replace('MASTERJ', 'MASTER OT J')
if name.startswith('PSNJ'):
name = name.replace('PSNJ', 'PSN J')
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
#print(name + " " + claimedtype + " " + epoch + " " + observer + " " + reducer + " " + specfile + " " + bibcode + " " + redshift)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondarybibcode, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
if bibcode:
newbibcode = bibcode
if bibcode in wiserepbibcorrectdict:
newbibcode = wiserepbibcorrectdict[bibcode]
if newbibcode:
source = add_source(name, bibcode = unescape(newbibcode))
else:
source = add_source(name, refname = unescape(bibcode))
sources = uniq_cdl([source, secondarysource])
else:
sources = secondarysource
if claimedtype not in ['Other']:
add_quantity(name, 'claimedtype', claimedtype, secondarysource)
add_quantity(name, 'redshift', redshift, secondarysource)
if not specpath:
continue
with open(specpath,'r') as f:
data = [x.split() for x in f]
skipspec = False
newdata = []
oldval = ''
for row in data:
if row and '#' not in row[0]:
if len(row) >= 2 and is_number(row[0]) and is_number(row[1]) and row[1] != oldval:
newdata.append(row)
oldval = row[1]
if skipspec or not newdata:
warnings.warn('Skipped adding spectrum file ' + specfile)
continue
data = [list(i) for i in zip(*newdata)]
wavelengths = data[0]
fluxes = data[1]
errors = ''
if len(data) == 3:
errors = data[1]
time = str(astrotime(epoch).mjd)
if max([float(x) for x in fluxes]) < 1.0e-5:
fluxunit = 'erg/s/cm^2/Angstrom'
else:
fluxunit = 'Uncalibrated'
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = fluxunit, errors = errors, errorunit = fluxunit, wavelengths = wavelengths,
fluxes = fluxes, u_time = 'MJD', time = time, instrument = instrument, source = sources, observer = observer, reducer = reducer,
filename = specfile)
wiserepcnt = wiserepcnt + 1
if args.travis and wiserepcnt % travislimit == 0:
break
tprint('Unadded files: ' + str(len(lfiles) - 1) + "/" + str(len(files)-1))
tprint('WISeREP spectrum count: ' + str(wiserepcnt))
journal_events()
if do_task(task, 'cfaspectra'):
# Ia spectra
oldname = ''
for name in tq(sorted(next(os.walk("../sne-external-spectra/CfA_SNIa"))[1], key=lambda s: s.lower()), currenttask):
fullpath = "../sne-external-spectra/CfA_SNIa/" + name
origname = name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
if name.startswith('snf') and is_number(name[3:7]):
name = 'SNF' + name[3:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = add_source(name, refname = reference, url = refurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, source)
for fi, fname in enumerate(sorted(glob(fullpath + '/*'), key=lambda s: s.lower())):
filename = os.path.basename(fname)
fileparts = filename.split('-')
if origname.startswith("sn") and is_number(origname[2:6]):
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:]
instrument = fileparts[2].split('.')[0]
else:
year = fileparts[2][:4]
month = fileparts[2][4:6]
day = fileparts[2][6:]
instrument = fileparts[3].split('.')[0]
time = str(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = data[1]
errors = data[2]
sources = uniq_cdl([source, add_source(name, bibcode = '2012AJ....143..126B'), add_source(name, bibcode = '2008AJ....135.1598M')])
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', filename = filename,
wavelengths = wavelengths, fluxes = fluxes, u_time = 'MJD' if time else '', time = time, instrument = instrument,
errorunit = "ergs/s/cm^2/Angstrom", errors = errors, source = sources, dereddened = False, deredshifted = False)
if args.travis and fi >= travislimit:
break
journal_events()
# Ibc spectra
oldname = ''
for name in tq(sorted(next(os.walk("../sne-external-spectra/CfA_SNIbc"))[1], key=lambda s: s.lower()), currenttask):
fullpath = "../sne-external-spectra/CfA_SNIbc/" + name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = add_source(name, refname = reference, url = refurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, source)
for fi, fname in enumerate(sorted(glob(fullpath + '/*'), key=lambda s: s.lower())):
filename = os.path.basename(fname)
fileparts = filename.split('-')
instrument = ''
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:].split('.')[0]
if len(fileparts) > 2:
instrument = fileparts[-1].split('.')[0]
time = str(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = data[1]
sources = uniq_cdl([source, add_source(name, bibcode = '2014AJ....147...99M')])
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths, filename = filename,
fluxes = fluxes, u_time = 'MJD' if time else '', time = time, instrument = instrument, source = sources,
dereddened = False, deredshifted = False)
if args.travis and fi >= travislimit:
break
journal_events()
# Other spectra
oldname = ''
for name in tq(sorted(next(os.walk("../sne-external-spectra/CfA_Extra"))[1], key=lambda s: s.lower()), currenttask):
fullpath = "../sne-external-spectra/CfA_Extra/" + name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = add_source(name, refname = reference, url = refurl, secondary = True, acknowledgment = cfaack)
add_quantity(name, 'alias', name, source)
for fi, fname in enumerate(sorted(glob(fullpath + '/*'), key=lambda s: s.lower())):
if not os.path.isfile(fname):
continue
filename = os.path.basename(fname)
if (not filename.startswith('sn') or not filename.endswith('flm') or
any(x in filename for x in ['-interp', '-z', '-dered', '-obj', '-gal'])):
continue
fileparts = filename.split('.')[0].split('-')
instrument = ''
time = ''
if len(fileparts) > 1:
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:]
if is_number(year) and is_number(month) and is_number(day):
if len(fileparts) > 2:
instrument = fileparts[-1]
time = str(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = [str(Decimal(x)*Decimal(1.0e-15)) for x in data[1]]
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths, filename = filename,
fluxes = fluxes, u_time = 'MJD' if time else '', time = time, instrument = instrument, source = source,
dereddened = False, deredshifted = False)
if args.travis and fi >= travislimit:
break
journal_events()
if do_task(task, 'snlsspectra'):
result = Vizier.get_catalogs("J/A+A/507/85/table1")
table = result[list(result.keys())[0]]
table.convert_bytestring_to_unicode(python3_only=True)
datedict = {}
for row in table:
datedict['SNLS-' + row['SN']] = str(astrotime(row['Date']).mjd)
oldname = ''
for fi, fname in enumerate(tq(sorted(glob('../sne-external-spectra/SNLS/*'), key=lambda s: s.lower()), currenttask = currenttask)):
filename = os.path.basename(fname)
fileparts = filename.split('_')
name = 'SNLS-' + fileparts[1]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
source = add_source(name, bibcode = "2009A&A...507...85B")
add_quantity(name, 'alias', name, source)
add_quantity(name, 'discoverdate', '20' + fileparts[1][:2], source)
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
specdata = []
for r, row in enumerate(data):
if row[0] == '@TELESCOPE':
telescope = row[1].strip()
elif row[0] == '@REDSHIFT':
add_quantity(name, 'redshift', row[1].strip(), source)
if r < 14:
continue
specdata.append(list(filter(None, [x.strip(' \t') for x in row])))
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[1]
fluxes = [pretty_num(float(x)*1.e-16, sig = get_sig_digits(x)) for x in specdata[2]]
errors = [pretty_num(float(x)*1.e-16, sig = get_sig_digits(x)) for x in specdata[3]]
add_spectrum(name = name, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths,
fluxes = fluxes, u_time = 'MJD' if name in datedict else '', time = datedict[name] if name in datedict else '', telescope = telescope, source = source,
filename = filename)
if args.travis and fi >= travislimit:
break
journal_events()
if do_task(task, 'cspspectra'):
oldname = ''
for fi, fname in enumerate(tq(sorted(glob('../sne-external-spectra/CSP/*'), key=lambda s: s.lower()), currenttask = currenttask)):
filename = os.path.basename(fname)
sfile = filename.split('.')
if sfile[1] == 'txt':
continue
sfile = sfile[0]
fileparts = sfile.split('_')
name = 'SN20' + fileparts[0][2:]
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
telescope = fileparts[-2]
instrument = fileparts[-1]
source = add_source(name, bibcode = "2013ApJ...773...53F")
add_quantity(name, 'alias', name, source)
f = open(fname,'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
specdata = []
for r, row in enumerate(data):
if row[0] == '#JDate_of_observation:':
jd = row[1].strip()
time = str(jd_to_mjd(Decimal(jd)))
elif row[0] == '#Redshift:':
add_quantity(name, 'redshift', row[1].strip(), source)
if r < 7:
continue
specdata.append(list(filter(None, [x.strip(' ') for x in row])))
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
add_spectrum(name = name, u_time = 'MJD', time = time, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom', wavelengths = wavelengths,
fluxes = fluxes, telescope = telescope, instrument = instrument, source = source, deredshifted = True, filename = filename)
if args.travis and fi >= travislimit:
break
journal_events()
if do_task(task, 'ucbspectra'):
secondaryreference = "UCB Filippenko Group's Supernova Database (SNDB)"
secondaryrefurl = "http://heracles.astro.berkeley.edu/sndb/info"
secondaryrefbib = "2012MNRAS.425.1789S"
ucbspectracnt = 0
jsontxt = load_cached_url("http://heracles.astro.berkeley.edu/sndb/download?id=allpubspec",
'../sne-external-spectra/UCB/allpub.json')
if not jsontxt:
continue
spectra = json.loads(jsontxt)
spectra = sorted(spectra, key = lambda k: k['ObjName'])
oldname = ''
for spectrum in tq(spectra, currenttask = currenttask):
name = spectrum["ObjName"]
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondaryrefbib, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
sources = [secondarysource]
if spectrum["Reference"]:
sources += [add_source(name, bibcode = spectrum["Reference"])]
sources = uniq_cdl(sources)
if spectrum["Type"] and spectrum["Type"].strip() != "NoMatch":
for ct in spectrum["Type"].strip().split(','):
add_quantity(name, 'claimedtype', ct.replace('-norm', '').strip(), sources)
if spectrum["DiscDate"]:
add_quantity(name, 'discoverdate', spectrum["DiscDate"].replace('-', '/'), sources)
if spectrum["HostName"]:
add_quantity(name, 'host', urllib.parse.unquote(spectrum["HostName"]).replace('*', ''), sources)
if spectrum["UT_Date"]:
epoch = str(spectrum["UT_Date"])
year = epoch[:4]
month = epoch[4:6]
day = epoch[6:]
sig = get_sig_digits(day) + 5
mjd = pretty_num(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)), sig = sig)
filename = spectrum["Filename"] if spectrum["Filename"] else ''
instrument = spectrum["Instrument"] if spectrum["Instrument"] else ''
reducer = spectrum["Reducer"] if spectrum["Reducer"] else ''
observer = spectrum["Observer"] if spectrum["Observer"] else ''
snr = str(spectrum["SNR"]) if spectrum["SNR"] else ''
if not filename:
raise(ValueError('Filename not found for SNDB spectrum!'))
if not spectrum["SpecID"]:
raise(ValueError('ID not found for SNDB spectrum!'))
filepath = '../sne-external-spectra/UCB/' + filename
if archived_task('ucbspectra') and os.path.isfile(filepath):
with open(filepath, 'r') as f:
spectxt = f.read()
else:
session = requests.Session()
response = session.get("http://heracles.astro.berkeley.edu/sndb/download?id=ds:" + str(spectrum["SpecID"]))
spectxt = response.text
with open(filepath, 'w') as f:
f.write(spectxt)
specdata = list(csv.reader(spectxt.splitlines(), delimiter=' ', skipinitialspace=True))
startrow = 0
for row in specdata:
if row[0][0] == '#':
startrow += 1
else:
break
specdata = specdata[startrow:]
haserrors = len(specdata[0]) == 3 and specdata[0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
if not list(filter(None, errors)):
errors = ''
add_spectrum(name = name, u_time = 'MJD', time = mjd, waveunit = 'Angstrom', fluxunit = 'Uncalibrated',
wavelengths = wavelengths, filename = filename, fluxes = fluxes, errors = errors, errorunit = 'Uncalibrated',
instrument = instrument, source = sources, snr = snr, observer = observer, reducer = reducer,
deredshifted = ('-noz' in filename))
ucbspectracnt = ucbspectracnt + 1
if args.travis and ucbspectracnt >= travislimit:
break
journal_events()
if do_task(task, 'suspectspectra'):
with open('../sne-external-spectra/Suspect/sources.json', 'r') as f:
sourcedict = json.loads(f.read())
with open('../sne-external-spectra/Suspect/filename-changes.txt', 'r') as f:
rows = f.readlines()
changedict = {}
for row in rows:
if not row.strip() or row[0] == "#":
continue
items = row.strip().split(' ')
changedict[items[1]] = items[0]
suspectcnt = 0
folders = next(os.walk('../sne-external-spectra/Suspect'))[1]
for folder in tq(folders, currenttask):
eventfolders = next(os.walk('../sne-external-spectra/Suspect/'+folder))[1]
oldname = ''
for eventfolder in tq(eventfolders, currenttask):
name = eventfolder
if is_number(name[:4]):
name = 'SN' + name
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
secondaryreference = "SUSPECT"
secondaryrefurl = "https://www.nhn.ou.edu/~suspect/"
secondarybibcode = "2001AAS...199.8408R"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondarybibcode, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
eventspectra = next(os.walk('../sne-external-spectra/Suspect/'+folder+'/'+eventfolder))[2]
for spectrum in eventspectra:
sources = [secondarysource]
bibcode = ''
if spectrum in changedict:
specalias = changedict[spectrum]
else:
specalias = spectrum
if specalias in sourcedict:
bibcode = sourcedict[specalias]
elif name in sourcedict:
bibcode = sourcedict[name]
if bibcode:
source = add_source(name, bibcode = unescape(bibcode))
sources += source
sources = uniq_cdl(sources)
date = spectrum.split('_')[1]
year = date[:4]
month = date[4:6]
day = date[6:]
sig = get_sig_digits(day) + 5
time = pretty_num(astrotime(year + '-' + month + '-' + str(floor(float(day))).zfill(2)).mjd + float(day) - floor(float(day)), sig = sig)
with open('../sne-external-spectra/Suspect/'+folder+'/'+eventfolder+'/'+spectrum) as f:
specdata = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
specdata = list(filter(None, specdata))
newspec = []
oldval = ''
for row in specdata:
if row[1] == oldval:
continue
newspec.append(row)
oldval = row[1]
specdata = newspec
haserrors = len(specdata[0]) == 3 and specdata[0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
add_spectrum(name = name, u_time = 'MJD', time = time, waveunit = 'Angstrom', fluxunit = 'Uncalibrated', wavelengths = wavelengths,
fluxes = fluxes, errors = errors, errorunit = 'Uncalibrated', source = sources, filename = spectrum)
suspectcnt = suspectcnt + 1
if args.travis and suspectcnt % travislimit == 0:
break
journal_events()
if do_task(task, 'snfspectra'):
eventfolders = next(os.walk('../sne-external-spectra/SNFactory'))[1]
bibcodes = {'SN2005gj':'2006ApJ...650..510A', 'SN2006D':'2007ApJ...654L..53T', 'SN2007if':'2010ApJ...713.1073S', 'SN2011fe':'2013A&A...554A..27P'}
oldname = ''
snfcnt = 0
for eventfolder in eventfolders:
name = eventfolder
name = get_preferred_name(name)
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
secondaryreference = "Nearby Supernova Factory"
secondaryrefurl = "http://snfactory.lbl.gov/"
secondarybibcode = "2002SPIE.4836...61A"
secondarysource = add_source(name, refname = secondaryreference, url = secondaryrefurl, bibcode = secondarybibcode, secondary = True)
add_quantity(name, 'alias', name, secondarysource)
bibcode = bibcodes[name]
source = add_source(name, bibcode = bibcode)
sources = uniq_cdl([source,secondarysource])
eventspectra = glob('../sne-external-spectra/SNFactory/'+eventfolder+'/*.dat')
for spectrum in eventspectra:
filename = os.path.basename(spectrum)
with open(spectrum) as f:
specdata = list(csv.reader(f, delimiter=' ', skipinitialspace=True))
specdata = list(filter(None, specdata))
newspec = []
time = ''
telescope = ''
instrument = ''
observer = ''
observatory = ''
if 'Keck_20060202_R' in spectrum:
time = '53768.23469'
elif 'Spectrum05_276' in spectrum:
time = pretty_num(astrotime('2005-10-03').mjd, sig = 5)
elif 'Spectrum05_329' in spectrum:
time = pretty_num(astrotime('2005-11-25').mjd, sig = 5)
elif 'Spectrum05_336' in spectrum:
time = pretty_num(astrotime('2005-12-02').mjd, sig = 5)
for row in specdata:
if row[0][0] == '#':
joinrow = (' '.join(row)).split('=')
if len(joinrow) < 2:
continue
field = joinrow[0].strip('# ')
value = joinrow[1].split('/')[0].strip("' ")
if not time:
if field == 'JD':
time = str(jd_to_mjd(Decimal(value)))
elif field == 'MJD':
time = value
elif field == 'MJD-OBS':
time = value
if field == 'OBSERVER':
observer = value.capitalize()
if field == 'OBSERVAT':
observatory = value.capitalize()
if field == 'TELESCOP':
telescope = value.capitalize()
if field == 'INSTRUME':
instrument = value.capitalize()
else:
newspec.append(row)
if not time:
raise(ValueError('Time missing from spectrum.'))
specdata = newspec
haserrors = len(specdata[0]) == 3 and specdata[0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
add_spectrum(name = name, u_time = 'MJD', time = time, waveunit = 'Angstrom', fluxunit = 'erg/s/cm^2/Angstrom',
wavelengths = wavelengths, fluxes = fluxes, errors = errors, observer = observer, observatory = observatory,
telescope = telescope, instrument = instrument,
errorunit = ('Variance' if name == 'SN2011fe' else 'erg/s/cm^2/Angstrom'), source = sources, filename = filename)
snfcnt = snfcnt + 1
if args.travis and snfcnt % travislimit == 0:
break
journal_events()
if do_task(task, 'superfitspectra'):
sfdirs = glob('../sne-external-spectra/superfit/*')
for sfdir in tq(sfdirs, currenttask = currenttask):
sffiles = sorted(glob(sfdir + "/*.dat"))
lastname = ''
oldname = ''
for sffile in tq(sffiles, currenttask = currenttask):
basename = os.path.basename(sffile)
name = basename.split('.')[0]
if name.startswith('sn'):
name = 'SN' + name[2:]
if len(name) == 7:
name = name[:6] + name[6].upper()
elif name.startswith('ptf'):
name = 'PTF' + name[3:]
if 'theory' in name:
continue
if event_exists(name):
prefname = get_preferred_name(name)
if 'spectra' in events[prefname] and lastname != prefname:
continue
if oldname and name != oldname:
journal_events()
oldname = name
name = add_event(name)
epoch = basename.split('.')[1]
(mldt, mlmag, mlband, mlsource) = get_max_light(name)
if mldt:
epoff = Decimal(0.0) if epoch == 'max' else (Decimal(epoch[1:]) if epoch[0] == 'p' else -Decimal(epoch[1:]))
else:
epoff = ''
source = add_source(name, refname = 'Superfit', url = 'http://www.dahowell.com/superfit.html', secondary = True)
add_quantity(name, 'alias', name, source)
with open(sffile) as f:
rows = f.read().splitlines()
specdata = []
for row in rows:
if row.strip():
specdata.append(list(filter(None,re.split('\t+|\s+', row, maxsplit=0))))
specdata = [[x.replace('D','E') for x in list(i)] for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
mlmjd = str(Decimal(astrotime('-'.join([str(mldt.year), str(mldt.month), str(mldt.day)])).mjd) + epoff) if (epoff != '') else ''
add_spectrum(name, u_time = 'MJD' if mlmjd else '', time = mlmjd, waveunit = 'Angstrom', fluxunit = 'Uncalibrated',
wavelengths = wavelengths, fluxes = fluxes, source = source)
lastname = name
journal_events()
if do_task(task, 'mergeduplicates'):
if args.update and not len(events):
tprint('No sources changed, event files unchanged in update.')
sys.exit(1)
merge_duplicates()
if do_task(task, 'setprefnames'):
set_preferred_names()
files = repo_file_list()
path = '../bibauthors.json'
if os.path.isfile(path):
with open(path, 'r') as f:
bibauthordict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
bibauthordict = OrderedDict()
path = '../extinctions.json'
if os.path.isfile(path):
with open(path, 'r') as f:
extinctionsdict = json.loads(f.read(), object_pairs_hook=OrderedDict)
else:
extinctionsdict = OrderedDict()
for fi in tq(files, 'Sanitizing and deriving quantities for events'):
events = OrderedDict()
name = os.path.basename(os.path.splitext(fi)[0]).replace('.json', '')
name = add_event(name, loadifempty = False)
derive_and_sanitize()
if has_task('writeevents'):
write_all_events(empty = True, gz = True, bury = True)
jsonstring = json.dumps(bibauthordict, indent='\t', separators=(',', ':'), ensure_ascii=False)
with codecs.open('../bibauthors.json', 'w', encoding='utf8') as f:
f.write(jsonstring)
jsonstring = json.dumps(extinctionsdict, indent='\t', separators=(',', ':'), ensure_ascii=False)
with codecs.open('../extinctions.json', 'w', encoding='utf8') as f:
f.write(jsonstring)
print("Memory used (MBs on Mac, GBs on Linux): " + "{:,}".format(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024./1024.))
sys.exit(0)
|
mit
| 6,726,496,294,048,525,000
| 49.503873
| 561
| 0.497728
| false
| 3.827239
| false
| false
| false
|
dparks1134/STAMP
|
stamp/plugins/samples/plots/SeqHistogram.py
|
1
|
9822
|
#=======================================================================
# Author: Donovan Parks
#
# Sequence histogram plot.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
import sys
import math
from PyQt4 import QtCore, QtGui
from stamp.plugins.samples.AbstractSamplePlotPlugin import AbstractSamplePlotPlugin, TestWindow, ConfigureDialog
from stamp.plugins.samples.plots.configGUI.seqHistogramUI import Ui_SeqHistogramDialog
class SeqHistogram(AbstractSamplePlotPlugin):
'''
Sequence histogram plot.
'''
def __init__(self, preferences, parent=None):
AbstractSamplePlotPlugin.__init__(self, preferences, parent)
self.preferences = preferences
self.name = 'Sequence histogram'
self.type = 'Exploratory'
self.settings = preferences['Settings']
self.figWidth = self.settings.value(self.name + '/width', 7.0).toDouble()[0]
self.figHeight = self.settings.value(self.name + '/height', 7.0).toDouble()[0]
self.bCustomBinWidth = self.settings.value(self.name + '/custom bin width', False).toBool()
self.binWidth = self.settings.value(self.name + '/bin width', 100.0).toDouble()[0]
self.yAxisLogScale = self.settings.value(self.name + '/log scale', False).toBool()
self.bCustomXaxis = self.settings.value(self.name + '/custom x-axis extents', False).toBool()
self.xLimitLeft = self.settings.value(self.name + '/min value', 0.0).toDouble()[0]
self.xLimitRight = self.settings.value(self.name + '/max value', 1.0).toDouble()[0]
self.legendPos = self.settings.value(self.name + '/legend position', 0).toInt()[0]
def mirrorProperties(self, plotToCopy):
self.name = plotToCopy.name
self.figWidth = plotToCopy.figWidth
self.figHeight = plotToCopy.figHeight
self.bCustomBinWidth = plotToCopy.bCustomBinWidth
self.binWidth = plotToCopy.binWidth
self.yAxisLogScale = plotToCopy.yAxisLogScale
self.bCustomXaxis = plotToCopy.bCustomXaxis
self.xLimitLeft = plotToCopy.xLimitLeft
self.xLimitRight = plotToCopy.xLimitRight
self.legendPos = plotToCopy.legendPos
def plot(self, profile, statsResults):
if len(profile.profileDict) <= 0:
self.emptyAxis()
return
# *** Colour of plot elements
axesColour = str(self.preferences['Axes colour'].name())
profile1Colour = str(self.preferences['Sample 1 colour'].name())
profile2Colour = str(self.preferences['Sample 2 colour'].name())
# *** Get sequence counts
seqs1 = profile.getSequenceCounts(0)
seqs2 = profile.getSequenceCounts(1)
# *** Set x-axis limit
self.xMin = min(min(seqs1),min(seqs2))
if self.xLimitLeft == None:
self.xLimitLeft = self.xMin
self.xMax = max(max(seqs1),max(seqs2))
if self.xLimitRight == None:
self.xLimitRight = self.xMax
# Set bin width
if not self.bCustomBinWidth:
self.binWidth = (self.xMax - self.xMin) / 40
# *** Set size of figure
self.fig.clear()
self.fig.set_size_inches(self.figWidth, self.figHeight)
heightBottomLabels = 0.4 # inches
widthSideLabel = 0.5 # inches
padding = 0.2 # inches
axesHist = self.fig.add_axes([widthSideLabel/self.figWidth,heightBottomLabels/self.figHeight,\
1.0-(widthSideLabel+padding)/self.figWidth,\
1.0-(heightBottomLabels+padding)/self.figHeight])
# *** Histogram plot
bins = [0]
binEnd = self.binWidth
while binEnd <= self.xMax:
bins.append(binEnd)
binEnd += self.binWidth
bins.append(binEnd)
n, b, patches = axesHist.hist([seqs1, seqs2], bins=bins, log=self.yAxisLogScale)
for patch in patches[0]:
patch.set_facecolor(profile1Colour)
for patch in patches[1]:
patch.set_facecolor(profile2Colour)
if self.bCustomXaxis:
axesHist.set_xlim(self.xLimitLeft, self.xLimitRight)
axesHist.set_xlabel('Sequences')
axesHist.set_ylabel('Number of features')
# *** Prettify plot
if self.legendPos != -1:
legend = axesHist.legend([patches[0][0], patches[1][0]], (profile.sampleNames[0], profile.sampleNames[1]), loc=self.legendPos)
legend.get_frame().set_linewidth(0)
for a in axesHist.yaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for a in axesHist.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for line in axesHist.yaxis.get_ticklines():
line.set_color(axesColour)
for line in axesHist.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axesHist.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
self.updateGeometry()
self.draw()
def configure(self, profile, statsResults):
self.profile = profile
self.configDlg = ConfigureDialog(Ui_SeqHistogramDialog)
self.connect(self.configDlg.ui.chkCustomBinWidth, QtCore.SIGNAL('toggled(bool)'), self.changeCustomBinWidth)
self.connect(self.configDlg.ui.chkCustomXaxis, QtCore.SIGNAL('toggled(bool)'), self.changeCustomXaxis)
self.connect(self.configDlg.ui.btnXmin, QtCore.SIGNAL('clicked()'), self.setXaxisMin)
self.connect(self.configDlg.ui.btnXmax, QtCore.SIGNAL('clicked()'), self.setXaxisMax)
self.configDlg.ui.spinFigWidth.setValue(self.figWidth)
self.configDlg.ui.spinFigHeight.setValue(self.figHeight)
self.configDlg.ui.chkCustomBinWidth.setChecked(self.bCustomBinWidth)
self.configDlg.ui.spinBinWidth.setValue(self.binWidth)
self.configDlg.ui.chkLogScale.setChecked(self.yAxisLogScale)
self.configDlg.ui.chkCustomXaxis.setChecked(self.bCustomXaxis)
self.configDlg.ui.spinXmin.setValue(self.xLimitLeft)
self.configDlg.ui.spinXmax.setValue(self.xLimitRight)
self.changeCustomBinWidth()
self.changeCustomXaxis()
# legend position
if self.legendPos == 0:
self.configDlg.ui.radioLegendPosBest.setDown(True)
elif self.legendPos == 1:
self.configDlg.ui.radioLegendPosUpperRight.setChecked(True)
elif self.legendPos == 7:
self.configDlg.ui.radioLegendPosCentreRight.setChecked(True)
elif self.legendPos == 4:
self.configDlg.ui.radioLegendPosLowerRight.setChecked(True)
elif self.legendPos == 2:
self.configDlg.ui.radioLegendPosUpperLeft.setChecked(True)
elif self.legendPos == 6:
self.configDlg.ui.radioLegendPosCentreLeft.setChecked(True)
elif self.legendPos == 3:
self.configDlg.ui.radioLegendPosLowerLeft.setChecked(True)
else:
self.configDlg.ui.radioLegendPosNone.setChecked(True)
if self.configDlg.exec_() == QtGui.QDialog.Accepted:
self.figWidth = self.configDlg.ui.spinFigWidth.value()
self.figHeight = self.configDlg.ui.spinFigHeight.value()
self.bCustomBinWidth = self.configDlg.ui.chkCustomBinWidth.isChecked()
self.binWidth = self.configDlg.ui.spinBinWidth.value()
self.yAxisLogScale = self.configDlg.ui.chkLogScale.isChecked()
self.bCustomXaxis = self.configDlg.ui.chkCustomXaxis.isChecked()
self.xLimitLeft = self.configDlg.ui.spinXmin.value()
self.xLimitRight = self.configDlg.ui.spinXmax.value()
# legend position
if self.configDlg.ui.radioLegendPosBest.isChecked() == True:
self.legendPos = 0
elif self.configDlg.ui.radioLegendPosUpperRight.isChecked() == True:
self.legendPos = 1
elif self.configDlg.ui.radioLegendPosCentreRight.isChecked() == True:
self.legendPos = 7
elif self.configDlg.ui.radioLegendPosLowerRight.isChecked() == True:
self.legendPos = 4
elif self.configDlg.ui.radioLegendPosUpperLeft.isChecked() == True:
self.legendPos = 2
elif self.configDlg.ui.radioLegendPosCentreLeft.isChecked() == True:
self.legendPos = 6
elif self.configDlg.ui.radioLegendPosLowerLeft.isChecked() == True:
self.legendPos = 3
else:
self.legendPos = -1
self.settings.setValue(self.name + '/width', self.figWidth)
self.settings.setValue(self.name + '/height', self.figHeight)
self.settings.setValue(self.name + '/custom bin width', self.bCustomBinWidth)
self.settings.setValue(self.name + '/bin width', self.binWidth)
self.settings.setValue(self.name + '/log scale', self.yAxisLogScale)
self.settings.setValue(self.name + '/custom x-axis extents', self.bCustomXaxis)
self.settings.setValue(self.name + '/min value', self.xLimitLeft)
self.settings.setValue(self.name + '/max value', self.xLimitRight)
self.settings.setValue(self.name + '/legend position', self.legendPos)
self.plot(profile, statsResults)
def changeCustomBinWidth(self):
self.configDlg.ui.spinBinWidth.setEnabled(self.configDlg.ui.chkCustomBinWidth.isChecked())
def changeCustomXaxis(self):
self.configDlg.ui.spinXmin.setEnabled(self.configDlg.ui.chkCustomXaxis.isChecked())
self.configDlg.ui.spinXmax.setEnabled(self.configDlg.ui.chkCustomXaxis.isChecked())
def setXaxisMin(self):
seqs1 = self.profile.getSequenceCounts(0)
seqs2 = self.profile.getSequenceCounts(1)
self.configDlg.ui.spinXmin.setValue(min(min(seqs1), min(seqs2)))
def setXaxisMax(self):
seqs1 = self.profile.getSequenceCounts(0)
seqs2 = self.profile.getSequenceCounts(1)
self.configDlg.ui.spinXmax.setValue(max(max(seqs1), max(seqs2)))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
testWindow = TestWindow(SeqHistogram)
testWindow.show()
sys.exit(app.exec_())
|
gpl-3.0
| -6,319,789,311,616,871,000
| 36.492366
| 129
| 0.723376
| false
| 3.12504
| true
| false
| false
|
jtladner/Scripts
|
BEAST_prep/beast_nexus_prep_v2.0.py
|
1
|
4797
|
#!/usr/bin/env python
from __future__ import division
import optparse, os
#This script uses an aligned fasta file and a tab deliminted file containing CDS coordinates to create a nexus input for BEAST
#In version 2.0, added a flag to throw if you only want coding sequence to be included in the nexus file
def main():
usage = '%prog [options]'
p = optparse.OptionParser()
p.add_option('-f', '--fasta', help='Aligned fasta. [None]')
p.add_option('-c', '--coords', help='Tab delimited file with coordinates of CDS. Should have at least 3 tab delimited columns. The first is not used, will probably have some sort of CDS name. The next two have start and stop base positions.[None]')
p.add_option('-o', '--out', help='Name for output nexus file. [None]')
p.add_option('--onlyCDS', default=False, action="store_true", help='Use this flag if you only want coding regions to be included in the output nexus file. [None]')
opts, args = p.parse_args()
make_beast_nexus(opts)
#----------------------End of main()
def make_beast_nexus(opts):
fout=open(opts.out, 'w')
#Read in seqs
names, seqs = read_fasta_lists(opts.fasta)
#Get coding coordinates
coding_coords=get_coords(opts.coords)
#Make concatenated coding seqs
coding_seqs=['']*len(seqs)
for start, end in coding_coords:
for i in range(len(seqs)):
coding_seqs[i]+=seqs[i][start-1:end]
if opts.onlyCDS:
fout.write("#NEXUS\n[File created using beast_nexus_prep.py using %s and %s]\n\nBEGIN TAXA;\n" % (opts.fasta, opts.coords))
fout.write("DIMENSIONS NTAX=%d;\n\nTAXLABELS\n%s\n;\n\nEND;\n" % (len(names), '\n'.join(names)))
fout.write("BEGIN CHARACTERS;\nDIMENSIONS NCHAR=%d;\nFORMAT DATATYPE=DNA MISSING=N GAP=-;\nMATRIX\n\n%s\n;\n\nEND;\n\n" % (len(coding_seqs[0]), '\n'.join(['%s %s' % (names[x], coding_seqs[x]) for x in range(len(names))])))
fout.write("BEGIN ASSUMPTIONS;\n\tcharset coding = 1-%d;\nend;\n" % (len(coding_seqs[0])))
else:
#Get non-coding coordinates
noncoding_coords=extrap_noncoding(coding_coords, len(seqs[0]))
#Make concatenated noncoding seqs
noncoding_seqs=['']*len(seqs)
for start, end in noncoding_coords:
for i in range(len(seqs)):
noncoding_seqs[i]+=seqs[i][start-1:end]
concat_seqs=[coding_seqs[i]+noncoding_seqs[i] for i in range(len(seqs))]
coding_start=1
coding_end=len(coding_seqs[0])
noncoding_start=coding_end+1
noncoding_end=len(concat_seqs[0])
fout.write("#NEXUS\n[File created using beast_nexus_prep.py using %s and %s]\n\nBEGIN TAXA;\n" % (opts.fasta, opts.coords))
fout.write("DIMENSIONS NTAX=%d;\n\nTAXLABELS\n%s\n;\n\nEND;\n" % (len(names), '\n'.join(names)))
fout.write("BEGIN CHARACTERS;\nDIMENSIONS NCHAR=%d;\nFORMAT DATATYPE=DNA MISSING=N GAP=-;\nMATRIX\n\n%s\n;\n\nEND;\n\n" % (len(concat_seqs[0]), '\n'.join(['%s %s' % (names[x], concat_seqs[x]) for x in range(len(names))])))
fout.write("BEGIN ASSUMPTIONS;\n\tcharset coding = %d-%d;\n\tcharset noncoding = %d-%d;\nend;\n" % (coding_start, coding_end, noncoding_start, noncoding_end ))
fout.close()
def extrap_noncoding(coding_coords, seq_len):
non_coords=[]
#To handle noncoding at the very beginning of the sequence
if coding_coords[0][0] != 1:
non_coords.append((1,coding_coords[0][0]-1))
#To handle noncoding regions in between coding seqs
coding_sorted=sorted(coding_coords[:])
for i in range(len(coding_sorted[:-1])):
if coding_sorted[i+1][0]-coding_sorted[i][1]>0:
non_coords.append((coding_sorted[i][1]+1,coding_sorted[i+1][0]-1))
#To handle non-coding at the very end of the sequence
if coding_coords[-1][1] != seq_len:
non_coords.append((coding_coords[-1][1]+1, seq_len))
print non_coords
return non_coords
def get_coords(c_file):
fin=open(c_file, 'r')
coords=[]
for line in fin:
cols=line.strip().split('\t')
coords.append((int(cols[1]), int(cols[2])))
return coords
# Extracts data from a fasta sequence file. Returns two lists, the first holds the names of the seqs (excluding the '>' symbol), and the second holds the sequences
def read_fasta_lists(file):
fin = open(file, 'r')
count=0
names=[]
seqs=[]
seq=''
for line in fin:
line=line.strip()
if line and line[0] == '>': #indicates the name of the sequence
count+=1
names.append(line[1:])
if count>1:
seqs.append(seq)
seq=''
else: seq +=line
seqs.append(seq)
return names, seqs
###------------------------------------->>>>
if __name__ == "__main__":
main()
|
gpl-3.0
| 1,622,315,349,966,179,600
| 38.644628
| 252
| 0.62143
| false
| 3.164248
| false
| false
| false
|
Xicnet/radioflow-scheduler
|
project/icecast_stats/views.py
|
1
|
4776
|
import os.path
import datetime
import pytz
from django.conf import settings
from django.db.models import F
from datetime import timedelta
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.models import User
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from rest_framework import generics
from rest_framework import serializers
from timeslot.models import Program, Day, Config
from icecast_stats.models import IcecastLog, ProgramStat
from realtime_stats import StatsCollector
@login_required
def index(request):
logs = IcecastLog.objects.all()[:50]
return render_to_response(
'icecast_stats/dashboard.html',
{
'logs': logs,
'weekly_programs': Program.get_weekly(request),
},
context_instance=RequestContext(request)
)
@login_required
def programacion(request):
return redirect('/program/')
@login_required
def realtime(request):
print settings.ICECAST_URL
stats = StatsCollector(
settings.ICECAST_URL,
settings.ICECAST_USER,
settings.ICECAST_PASS,
settings.ICECAST_REALM,
settings.ICECAST_MOUNT
)
stats_data = stats.run()
return render_to_response(
'icecast_stats/realtime.html',
{
'listeners': stats_data,
},
context_instance=RequestContext(request)
)
@login_required
def chat(request):
return render_to_response(
'icecast_stats/chat.html',
{
'weekly_programs': Program.get_weekly(request),
},
context_instance=RequestContext(request)
)
# Serializers define the API representation.
class IcecastLogSerializer(serializers.ModelSerializer):
class Meta:
model = IcecastLog
class IcecastLogViewSet( generics.ListAPIView):
#@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(IcecastLogViewSet, self).dispatch(*args, **kwargs)
serializer_class = IcecastLogSerializer
def get_queryset(self):
"""
This view should return a list of all the purchases for
the user as determined by the mount portion of the URL.
"""
mount = self.request.query_params.get('mount', None)
start = "%s 00:00:00" % self.request.query_params.get('start', None)
end = "%s 00:00:00" % self.request.query_params.get('end', None)
#end = datetime.date("%s 00:00:00" % self.request.query_params.get('end', None), tzinfo=pytz.UTC)
limit = self.request.query_params.get('limit', None)
if self.request.user.is_superuser:
#logs = IcecastLog.objects.all()
logs = IcecastLog.objects.filter(mount=mount)
else:
#mount = os.path.basename(User.objects.get(username=self.request.user.username).config.streamurl)
logs = IcecastLog.objects.filter(mount=mount)
if mount:
logs = logs.filter(mount=mount)
if start and end:
logs = logs.filter(datetime_start__gte=start, datetime_end__lte=end, datetime_end__gt=F('datetime_start') + timedelta(seconds=5) )
return logs[:limit]
class ProgramStatSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramStat
class ProgramStatViewSet( generics.ListAPIView):
#@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ProgramStatViewSet, self).dispatch(*args, **kwargs)
serializer_class = ProgramStatSerializer
def get_queryset(self):
"""
This view should return a list of all the purchases for
the user as determined by the mount portion of the URL.
"""
mount = self.request.query_params.get('mount', None)
start = "%s 00:00:00" % self.request.query_params.get('start', None)
end = "%s 00:00:00" % self.request.query_params.get('end', None)
limit = self.request.query_params.get('limit', None)
if self.request.user.is_superuser:
#program_stat = ProgramStat.objects.all()
program_stat = ProgramStat.objects.filter(log_entry__mount=mount)
else:
program_stat = ProgramStat.objects.filter(log_entry__mount=mount)
if mount:
program_stat = program_stat.filter(log_entry__mount=mount)
if start and end:
program_stat = program_stat.filter(log_entry__datetime_start__gte=start, log_entry__datetime_end__lte=end)
return program_stat[:limit]
|
agpl-3.0
| -1,338,487,912,704,370,200
| 31.27027
| 142
| 0.650126
| false
| 3.898776
| false
| false
| false
|
ndp-systemes/odoo-addons
|
stock_specific_inventory/__openerp__.py
|
1
|
1636
|
# -*- coding: utf8 -*-
#
# Copyright (C) 2015 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
{
'name': 'Stock Specific Inventory',
'version': '0.1',
'author': 'NDP Systèmes',
'maintainer': 'NDP Systèmes',
'category': 'Warehouse',
'depends': ['stock'],
'description': """
Stock Specific Inventory
==========================
This module adds the possibility to make inventories on a product selection.
It also improves the view of last inventories per product. Products can be selected directly from this view and a new
inventory on these products be made.
""",
'website': 'http://www.ndp-systemes.fr',
'data': [
'security/ir.model.access.csv',
'views/stock_specific_product_for_inventory.xml'
],
'demo': [
'test_stock_specific_inventory_demo.xml'
],
'test': [],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
'application': False,
}
|
agpl-3.0
| 438,542,075,688,769,860
| 33.020833
| 117
| 0.660747
| false
| 3.711364
| false
| false
| false
|
PaddoInWonderland/PaddoCogs
|
schmeckles/schmeckles.py
|
1
|
2347
|
import re
class Schmeckles:
def __init__(self, bot):
self.bot = bot
self.p = re.compile('([^\n\.\,\r\d-]{0,30})(-?[\d|,]{0,300}\.{0,1}\d{1,300} schmeckle[\w]{0,80})([^\n\.\,\r\d-]{0,30})', re.IGNORECASE)
async def schmeckle2usd(self, schmeckle):
"""1 Schmeckle = $148 USD
https://www.reddit.com/r/IAmA/comments/202owt/we_are_dan_harmon_and_justin_roiland_creators_of/cfzfv79"""
return schmeckle * 148.0
async def schmeckle2eur(self, schmeckle):
return schmeckle * 139.25 # latest USDEUR value
async def schmeckle2yen(self, schmeckle):
return schmeckle * 139.25 # latest USDYEN value
async def schmeckle2rub(self, schmeckle):
return schmeckle * 139.25 # latest USDRUB value
async def searchForSchmeckles(self, content):
if any([x in content.lower() for x in ['?', 'how much', 'what is', 'how many', 'euro', 'usd', 'dollars', 'dollar', 'euros']]):
return self.p.search(content)
return None
async def getSchmeckles(self, content):
get_schmeckles = await self.searchForSchmeckles(content)
if get_schmeckles:
match = get_schmeckles.groups()
euro = any([x in match[-1].lower() for x in ['eur', 'euro', 'euros']])
dollar = any([x in match[-1].lower() for x in ['usd', 'dollar', 'dollars']])
if euro and not dollar:
value = await self.schmeckle2eur(float(match[1].split()[0])), 'EUR', match[1].split()[0]
elif dollar and not euro:
value = await self.schmeckle2usd(float(match[1].split()[0])), 'USD', match[1].split()[0]
elif not dollar and not euro:
value = await self.schmeckle2usd(float(match[1].split()[0])), 'USD', match[1].split()[0]
return value
return None
async def _on_message(self, message):
content = message.content
author = message.author
channel = message.channel
if author.id != self.bot.user.id:
schmeckles = await self.getSchmeckles(content)
if schmeckles:
await self.bot.send_message(channel, '{0[2]} SHM is about {0[0]:.2f} {0[1]}'.format(schmeckles))
def setup(bot):
cog = Schmeckles(bot)
bot.add_listener(cog._on_message, "on_message")
bot.add_cog(cog)
|
gpl-3.0
| -4,986,179,851,531,409,000
| 40.910714
| 143
| 0.588411
| false
| 2.970886
| false
| false
| false
|
joshua-cogliati-inl/raven
|
framework/Optimizers/GradientBasedOptimizer.py
|
1
|
52237
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Gradient Based Optimization strategy
Created on June 16, 2016
@author: chenj
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import sys
import os
import copy
import abc
import numpy as np
from numpy import linalg as LA
from sklearn.neighbors import NearestNeighbors
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .Optimizer import Optimizer
from Assembler import Assembler
from utils import utils,cached_ndarray,mathUtils
#Internal Modules End--------------------------------------------------------------------------------
class GradientBasedOptimizer(Optimizer):
"""
This is the base class for gradient based optimizer. The following methods need to be overridden by all derived class
self.localLocalInputAndChecks(self, xmlNode,paraminput)
self.localLocalInitialize(self, solutionExport)
self.localLocalGenerateInput(self,model,oldInput)
self.localEvaluateGradient(self, optVarsValues, gradient = None)
"""
##########################
# Initialization Methods #
##########################
def __init__(self):
"""
Default Constructor that will initialize member variables with reasonable
defaults or empty lists/dictionaries where applicable.
@ In, None
@ Out, None
"""
Optimizer.__init__(self)
self.ableToHandelFailedRuns = True # is this optimizer able to handle failed runs?
self.constraintHandlingPara = {} # Dict containing parameters for parameters related to constraints handling
self.gradientNormTolerance = 1.e-3 # tolerance on the L2 norm of the gradient
self.gradDict = {} # Dict containing information for gradient related operations
self.gradDict['numIterForAve' ] = 1 # Number of iterations for gradient estimation averaging, denoising number.
self.gradDict['pertNeeded' ] = 1 # Number of perturbation needed to evaluate gradient (globally, considering denoising) for example, pertNeeded = dimension * 1(if not central differenc) * denoise in Finite Difference
self.paramDict['pertSingleGrad'] = 1 # Number of perturbation needed to evaluate a single gradient denoised points needed to evaluate gradient, eg, 1 for SPSA, dim for FD w/o central diff, 2*dim for central diff
self.gradDict['pertPoints' ] = {} # Dict containing normalized inputs sent to model for gradient evaluation
self.readyVarsUpdate = {} # Bool variable indicating the finish of gradient evaluation and the ready to update decision variables
self.counter['perturbation' ] = {} # Counter for the perturbation performed.
self.counter['gradientHistory' ] = {} # In this dict we store the gradient value (versor) for current and previous iterations {'trajectoryID':[{},{}]}
self.counter['gradNormHistory' ] = {} # In this dict we store the gradient norm for current and previous iterations {'trajectoryID':[float,float]}
self.counter['varsUpdate' ] = {}
self.counter['solutionUpdate' ] = {}
self.counter['lastStepSize' ] = {} # counter to track the last step size taken, by trajectory
# line search parameters used in dcsrch function inside minpack2 from Scipy
self.counter['iSave'] = {} # integer work array of dimension 2 for line search in scipy minpack2
# isave(1): whether a minimizer has been bracketed in an interval with endpoints
# isave(2): whether a lower function value has been obtained
self.counter['dSave'] = {} # double precision work array of dimension 13 for line search, this array store the previous line search results as:
# dsave(1): derivative of the problem at previous step
# dsave(2) nonnegative tolerance for the sufficient decrease condition on gradient calculation
# dsave(3) derivative at the best step on variables
# dsave(4) derivative at best residuals;
# dsave(5) value of the problem at step
# dsave(6) value of the problem at best step
# dsave(7) value of the problem at second best step
# dsave(8) best step obtained so far, endpoint of the interval that contains the minimizer.
# dsave(9) second endpoint of the interval that contains the minimizer.
# dsave(10) minimum step in line search
# dsave(11) maximum step in line search
# dsave(12) range of the step
# dsave(13) range to decide if a bisection step is needed
self.counter['task'] = {} # bite string for the task in line search, initial entry task must be set to 'START', at the end of each line search exit with convergence, a warning or an error
# Conjugate gradient parameters
self.counter['gtol'] = {} # specifies a nonnegative tolerance for the curvature condition in conjugate gradient calculation
self.counter['xk'] = {} # ndarray, best optimal point as an array for conjugate gradient calculation
self.counter['gfk'] = {} # ndarray, gradient value as an array for current point in searching the strong wolfe condition in conjugate calculation
self.counter['pk'] = {} # ndarray, search direction in searching the strong wolfe condition in conjugate calculation
self.counter['newFVal'] = {} # float, function value for a new optimal point
self.counter['oldFVal'] = {} # float, function value for last optimal point
self.counter['oldOldFVal'] = {} # float, function value for penultimate optimal point
self.counter['oldGradK'] = {} # ndarray, gradient value as an array for current best optimal point
self.counter['gNorm'] = {} # float, norm of the current gradient
self.counter['deltaK'] = {} # float, inner product of the current gradient for calculation of the Polak–Ribière stepsize
self.counter['derPhi0'] = {} # float, objective function derivative at each begining of the line search
self.counter['alpha'] = {} # float, stepsize for conjugate gradient method in current dirrection
self.resampleSwitch = True # bool, resample switch
self.resample = {} # bool, whether next point is a resample opt point if True, then the next submit point is a resample opt point with new perturbed gradient point
self.convergeTraj = {}
self.convergenceProgress = {} # tracks the convergence progress, by trajectory
self.trajectoriesKilled = {} # by traj, store traj killed, so that there's no mutual destruction
self.recommendToGain = {} # recommended action to take in next step, by trajectory
self.gainGrowthFactor = 2. # max step growth factor
self.gainShrinkFactor = 2. # max step shrinking factor
self.optPointIndices = [] # in this list we store the indices that correspond to the opt point
self.perturbationIndices = [] # in this list we store the indices that correspond to the perturbation.
self.useCentralDiff = False # whether to use central differencing
self.useGradHist = False # whether to use Gradient history
# REWORK 2018-10 for simultaneous point-and-gradient evaluations
self.realizations = {} # by trajectory, stores the results obtained from the jobs running, see setupNewStorage for structure
# register metadata
self.addMetaKeys(['trajID','varsUpdate','prefix'])
def localInputAndChecks(self, xmlNode, paramInput):
"""
Method to read the portion of the xml input that belongs to all gradient based optimizer only
and initialize some stuff based on the inputs got
@ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
@ In, paramInput, InputData.ParameterInput, the parsed parameters
@ Out, None
"""
for child in paramInput.subparts:
if child.getName() == "initialization":
for grandchild in child.subparts:
tag = grandchild.getName()
if tag == "resample":
self.resampleSwitch = grandchild.value
if child.getName() == "convergence":
for grandchild in child.subparts:
tag = grandchild.getName()
if tag == "gradientThreshold":
self.gradientNormTolerance = grandchild.value
elif tag == "gainGrowthFactor":
self.gainGrowthFactor = grandchild.value
self.raiseADebug('Gain growth factor is set at',self.gainGrowthFactor)
elif tag == "gainShrinkFactor":
self.gainShrinkFactor = grandchild.value
self.raiseADebug('Gain shrink factor is set at',self.gainShrinkFactor)
elif tag == "centralDifference":
self.useCentralDiff = grandchild.value
elif tag == "useGradientHistory":
self.useGradHist = grandchild.value
self.gradDict['numIterForAve'] = int(self.paramDict.get('numGradAvgIterations', 1))
def localInitialize(self,solutionExport):
"""
Method to initialize settings that belongs to all gradient based optimizer
@ In, solutionExport, DataObject, a PointSet to hold the solution
@ Out, None
"""
for traj in self.optTraj:
self.gradDict['pertPoints'][traj] = {}
self.counter['perturbation'][traj] = 0
self.counter['varsUpdate'][traj] = 0
self.counter['solutionUpdate'][traj] = 0
self.counter['gradientHistory'][traj] = [{},{}]
self.counter['lastStepSize'][traj] = [{},{}]
self.counter['gradNormHistory'][traj] = [0.0,0.0]
self.counter['persistence'][traj] = 0
self.counter['iSave'][traj] = np.zeros((2,), np.intc)
self.counter['dSave'][traj] = np.zeros((13,), float)
self.counter['task'][traj] = b'START'
self.counter['gtol'][traj] = 1e-08
self.counter['xk'][traj] = None
self.counter['gfk'][traj] = None
self.counter['pk'][traj] = None
self.counter['newFVal'][traj] = None
self.counter['oldFVal'][traj] = None
self.counter['oldOldFVal'][traj] = None
self.counter['oldGradK'][traj] = None
self.counter['gNorm'][traj] = None
self.counter['deltaK'][traj] = None
self.counter['derPhi0'][traj] = None
self.counter['alpha'][traj] = None
self.resample[traj] = False
self.optVarsHist[traj] = {}
self.readyVarsUpdate[traj] = False
self.convergeTraj[traj] = False
self.status[traj] = {'process':'submitting new opt points', 'reason':'just started'}
self.counter['recentOptHist'][traj] = [{},{}]
self.trajectoriesKilled[traj] = []
# end job runnable equal to number of trajectory
self._endJobRunnable = len(self.optTraj)
# initialize index lists
## opt point evaluations are indices 0 through number of re-evaluation points
self.optPointIndices = list(range(0,self.gradDict['numIterForAve']+1))
## perturbation evaluations are indices starting at the end of optPoint and going through all the rest
self.perturbationIndices = list(range(self.gradDict['numIterForAve'],self.gradDict['numIterForAve']*(self.paramDict['pertSingleGrad']+1)))
#specializing the self.localLocalInitialize()
self.localLocalInitialize(solutionExport=solutionExport)
@abc.abstractmethod
def localLocalInitialize(self, solutionExport):
"""
Method to initialize local settings.
@ In, solutionExport, DataObject, a PointSet to hold the solution
@ Out, None
"""
pass
###############
# Run Methods #
###############
def evaluateGradient(self, traj):
"""
Method to evaluate gradient based on perturbed points and model evaluations.
@ In, traj, int, the trajectory id
@ Out, gradient, dict, dictionary containing gradient estimation. gradient should have the form {varName: gradEstimation}
"""
# let the local do the main gradient evaluation
gradient = self.localEvaluateGradient(traj)
# we intend for gradient to give direction only, so get the versor
## NOTE this assumes gradient vectors are 0 or 1 dimensional, not 2 or more! (vectors or scalars, not matrices)
gradientNorm = self.calculateMultivectorMagnitude(gradient.values())
# store this norm, infinite or not
self.counter['gradNormHistory'][traj][0] = gradientNorm
#fix inf
if gradientNorm == np.inf:
# if there are infinites, then only infinites should remain, and they are +-1
for v,var in enumerate(gradient.keys()):
# first, set all non-infinites to 0, since they can't compete with infinites
gradient[var][-np.inf < gradient[var] < np.inf] = 0.0
# set +- infinites to +- 1 (arbitrary) since they're all equally important
gradient[var][gradient[var] == -np.inf] = -1.0
gradient[var][gradient[var] == np.inf] = 1.0
# set up the new grad norm
gradientNorm = self.calculateMultivectorMagnitude(gradient.values())
# normalize gradient (if norm is zero, skip this)
if gradientNorm != 0.0:
for var in gradient.keys():
gradient[var] = gradient[var]/gradientNorm
# if float coming in, make it a float going out
if len(gradient[var])==1:
gradient[var] = float(gradient[var])
# store gradient
try:
self.counter['gradientHistory'][traj][1] = self.counter['gradientHistory'][traj][0]
self.counter['lastStepSize'][traj][1] = self.counter['lastStepSize'][traj][0]
except IndexError:
pass # don't have a history on the first pass
self.counter['gradientHistory'][traj][0] = gradient
return gradient
def finalizeSampler(self, failedRuns):
"""
Method called at the end of the Step when no more samples will be taken. Closes out optimizer.
@ In, failedRuns, list, list of JobHandler.ExternalRunner objects
@ Out, None
"""
Optimizer.handleFailedRuns(self, failedRuns)
# get the most optimal point among the trajectories
bestValue = None
bestTraj = None
for traj in self.counter['recentOptHist'].keys():
value = self.counter['recentOptHist'][traj][0][self.objVar]
self.raiseADebug('For trajectory "{}" the best value was'.format(traj+1),value)
if bestTraj is None:
bestTraj = traj
bestValue = value
continue
if self.checkIfBetter(value,bestValue):
bestTraj = traj
bestValue = value
# now have the best trajectory, so write solution export
bestPoint = self.denormalizeData(self.counter['recentOptHist'][bestTraj][0])
self.raiseADebug('The best overall trajectory ending was for trajectory "{}".'.format(bestTraj+1))
self.raiseADebug(' The optimal location is at:')
for v in self.getOptVars():
self.raiseADebug(' {} = {}'.format(v,bestPoint[v]))
self.raiseADebug(' The objective value there: {}'.format(bestValue))
self.raiseADebug('====================')
self.raiseADebug('| END OPTIMIZATION |')
self.raiseADebug('====================')
# _always_ re-add the last point to the solution export, but use a new varsUpdate value
overwrite = {'varsUpdate': self.counter['varsUpdate'][traj]}
self.writeToSolutionExport(bestTraj, self.normalizeData(bestPoint), True, overwrite=overwrite)
def localEvaluateGradient(self, optVarsValues, gradient = None):
"""
Local method to evaluate gradient.
@ In, optVarsValues, dict, dictionary containing perturbed points.
optVarsValues should have the form {pertIndex: {varName: [varValue1 varValue2]}}
Therefore, each optVarsValues[pertIndex] should return a dict of variable values
that is sufficient for gradient evaluation for at least one variable
(depending on specific optimization algorithm)
@ In, gradient, dict, optional, dictionary containing gradient estimation by the caller.
gradient should have the form {varName: gradEstimation}
@ Out, gradient, dict, dictionary containing gradient estimation. gradient should have the form {varName: gradEstimation}
"""
return gradient
def localFinalizeActualSampling(self,jobObject,model,myInput):
"""
Overwrite only if you need something special at the end of each run....
This function is used by optimizers that need to collect information from the just ended run
@ In, jobObject, instance, an instance of a Runner
@ In, model, Model, instance of a RAVEN model
@ In, myInput, list, the generating input
@ Out, None
"""
# collect finished jobs
prefix = jobObject.getMetadata()['prefix']
traj, step, identifier = [int(x) for x in prefix.split('_')] # FIXME This isn't generic for any prefixing system
self.raiseADebug('Collected sample "{}"'.format(prefix))
failed = jobObject.getReturnCode() != 0
if failed:
self.raiseADebug(' ... sample "{}" FAILED. Cutting step and re-queueing.'.format(prefix))
# since run failed, cut the step and requeue
## cancel any further runs at this point
self.cancelJobs([self._createEvaluationIdentifier(traj, self.counter['varsUpdate'][traj], i) for i in range(1, self.perturbationIndices[-1]+1)])
self.recommendToGain[traj] = 'cut'
grad = self.counter['gradientHistory'][traj][0]
new = self._newOptPointAdd(grad, traj)
if new is not None:
self._createPerturbationPoints(traj, new)
else:
self.raiseADebug('After failing a point, trajectory {} is not adding new points!'.format(traj))
self._setupNewStorage(traj)
else:
# update self.realizations dictionary for the right trajectory
# category: is this point an "opt" or a "grad" evaluations?
# number is which variable is being perturbed, ie which dimension 0 indexed
category, number, _, cdId = self._identifierToLabel(identifier)
# done is whether the realization finished
# index: where is it in the dataObject
# find index of sample in the target evaluation data object
done, index = self._checkModelFinish(str(traj), str(step), str(identifier))
# sanity check
if not done:
self.raiseAnError(RuntimeError,'Trying to collect "{}" but identifies as not done!'.format(prefix))
# store index for future use
# number is the varID
number = number + (cdId * len(self.fullOptVars))
self.realizations[traj]['collect'][category][number].append(index)
# check if any further action needed because we have all the points we need for opt or grad
if len(self.realizations[traj]['collect'][category][number]) == self.realizations[traj]['need']:
# get the output space (input space included as well)
outputs = self._averageCollectedOutputs(self.realizations[traj]['collect'][category][number])
# store denoised results
self.realizations[traj]['denoised'][category][number] = outputs
# if we just finished "opt", check some acceptance and convergence checking
if category == 'opt':
converged = self._finalizeOptimalCandidate(traj,outputs)
else:
converged = False
# if both opts and grads are now done, then we can do an evaluation
## note that by now we've ALREADY accepted the point; if it was rejected, it would have been reset by now.
optDone = bool(len(self.realizations[traj]['denoised']['opt'][0]))
gradDone = all( len(self.realizations[traj]['denoised']['grad'][i]) for i in range(self.paramDict['pertSingleGrad']))
if not converged and optDone and gradDone:
optCandidate = self.normalizeData(self.realizations[traj]['denoised']['opt'][0])
# update solution export
## only write here if we want to write on EVERY optimizer iteration (each new optimal point)
if self.writeSolnExportOn == 'every':
self.writeToSolutionExport(traj, optCandidate, self.realizations[traj]['accepted'])
# whether we wrote to solution export or not, update the counter
self.counter['solutionUpdate'][traj] += 1
self.counter['varsUpdate'][traj] += 1
## since accepted, update history
try:
self.counter['recentOptHist'][traj][1] = copy.deepcopy(self.counter['recentOptHist'][traj][0])
except KeyError:
# this means we don't have an entry for this trajectory yet, so don't copy anything
pass
# store realization of most recent developments
self.counter['recentOptHist'][traj][0] = optCandidate
# find the new gradient for this trajectory at the new opt point
grad = self.evaluateGradient(traj)
# get a new candidate
new = self._newOptPointAdd(grad, traj)
if new is not None:
# add new gradient points
self._createPerturbationPoints(traj, new)
# reset storage
self._setupNewStorage(traj)
def localGenerateInput(self,model,oldInput):
"""
Method to generate input for model to run
@ In, model, model instance, it is the instance of a RAVEN model
@ In, oldInput, list, a list of the original needed inputs for the model (e.g. list of files, etc. etc)
@ Out, None
"""
self.readyVarsUpdate = {traj:False for traj in self.optTrajLive}
def localStillReady(self,ready, convergence = False): #,lastOutput=None
"""
Determines if optimizer is ready to provide another input. If not, and if jobHandler is finished, this will end sampling.
@ In, ready, bool, variable indicating whether the caller is prepared for another input.
@ In, convergence, bool, optional, variable indicating whether the convergence criteria has been met.
@ Out, ready, bool, boolean variable indicating whether the caller is prepared for another input.
"""
#let this be handled at the local subclass level for now
return ready
###################
# Utility Methods #
###################
def _averageCollectedOutputs(self,collection):
"""
Averages the results of several realizations that are denoising evaluations of a single point
@ In, collection, list, list of indices of evaluations for a single point
@ Out, outputs, dict, dictionary of average values
"""
# make a place to store distinct evaluation values
outputs = dict((var,np.zeros(self.gradDict['numIterForAve'],dtype=object))
for var in self.solutionExport.getVars('output')
if var in self.mdlEvalHist.getVars('output'))
for i, index in enumerate(collection):
vals = self.mdlEvalHist.realization(index=index)
# store the inputs for later on first iteration
if i == 0:
inputs = dict((var,vals[var]) for var in self.getOptVars())
for var in outputs.keys():
# store values; cover vector variables as well as scalars, as well as vectors that should be scalars
if hasattr(vals[var],'__len__') and len(vals[var]) == 1:
outputs[var][i] = float(vals[var])
else:
outputs[var][i] = vals[var]
# average the collected outputs for the opt point
for var,vals in outputs.items():
outputs[var] = vals.mean()
outputs.update(inputs)
return outputs
def calculateMultivectorMagnitude(self,values):
"""
Calculates the magnitude of vector "values", where values might be a combination of scalars and vectors (but not matrices [yet]).
Calculates the magnitude as if "values" were flattened into a 1d array.
@ In, values, list, values for which the magnitude will be calculated
@ Out, mag, float, magnitude
"""
# use np.linalg.norm (Frobenius norm) to calculate magnitude
## pre-normalise vectors, this is mathematically equivalent to flattening the vector first
## NOTE this assumes gradient vectors are 0 or 1 dimensional, not 2 or more! (vectors or scalars, not matrices)
# TODO this could be sped up if we could avoid calling np.atleast_1d twice, but net slower if we loop first
preMag = [np.linalg.norm(val) if len(np.atleast_1d(val))>1 else np.atleast_1d(val)[0] for val in values]
## then get the magnitude of the result, and return it
return np.linalg.norm(preMag)
def checkConvergence(self):
"""
Method to check whether the convergence criteria has been met.
@ In, None
@ Out, convergence, list, list of bool variable indicating whether the convergence criteria has been met for each trajectory.
"""
convergence = True
for traj in self.optTraj:
if not self.convergeTraj[traj]:
convergence = False
break
return convergence
def _checkModelFinish(self, traj, updateKey, evalID):
"""
Determines if the Model has finished running an input and returned the output
@ In, traj, int, traj on which the input is being checked
@ In, updateKey, int, the id of variable update on which the input is being checked
@ In, evalID, int or string, indicating the id of the perturbation (int) or its a variable update (string 'v')
@ Out, _checkModelFinish, tuple(bool, int), (1,realization dictionary),
(indicating whether the Model has finished the evaluation over input identified by traj+updateKey+evalID, the index of the location of the input in dataobject)
"""
if len(self.mdlEvalHist) == 0:
return (False,-1)
lookFor = '{}_{}_{}'.format(traj,updateKey,evalID)
index,match = self.mdlEvalHist.realization(matchDict = {'prefix':lookFor})
# if no match, return False
if match is None:
return False,-1
# otherwise, return index of match
return True, index
def _createEvaluationIdentifier(self,trajID,iterID,evalType):
"""
Create evaluation identifier
@ In, trajID, integer, trajectory identifier
@ In, iterID, integer, iteration number (identifier)
@ In, evalType, integer or string, evaluation type (v for variable update; otherwise id for gradient evaluation)
@ Out, identifier, string, the evaluation identifier
"""
identifier = str(trajID) + '_' + str(iterID) + '_' + str(evalType)
return identifier
def _finalizeOptimalCandidate(self,traj,outputs):
"""
Once all the data for an opt point has been collected:
- determine convergence
- determine redundancy
- determine acceptability
- queue new points (if rejected)
@ In, traj, int, the trajectory we are currently considering
@ In, outputs, dict, denoised new optimal point
@ Out, converged, bool, if True then indicates convergence has been reached
"""
# check convergence and check if new point is accepted (better than old point)
if self.resample[traj]:
accepted = True
else:
accepted = self._updateConvergenceVector(traj, self.counter['solutionUpdate'][traj], outputs)
# if converged, we can wrap up this trajectory
if self.convergeTraj[traj]:
# end any excess gradient evaluation jobs
self.cancelJobs([self._createEvaluationIdentifier(traj,self.counter['varsUpdate'][traj],i) for i in self.perturbationIndices])
return True #converged
# if not accepted, we need to scrap this run and set up a new one
if accepted:
# store acceptance for later
if self.resample[traj]:
self.realizations[traj]['accepted'] = 'resample'
self.raiseADebug('This is a resample point')
else:
self.realizations[traj]['accepted'] = 'accepted'
self.resample[traj] = False
else:
self.resample[traj] = self.checkResampleOption(traj)
# cancel all gradient evaluations for the rejected candidate immediately
self.cancelJobs([self._createEvaluationIdentifier(traj,self.counter['varsUpdate'][traj],i) for i in self.perturbationIndices])
# update solution export
optCandidate = self.normalizeData(self.realizations[traj]['denoised']['opt'][0])
## only write here if we want to write on EVERY optimizer iteration (each new optimal point)
if self.writeSolnExportOn == 'every':
self.writeToSolutionExport(traj, optCandidate, self.realizations[traj]['accepted'])
# whether we wrote to solution export or not, update the counter
self.counter['solutionUpdate'][traj] += 1
self.counter['varsUpdate'][traj] += 1
# new point setup
## keep the old grad point
grad = self.counter['gradientHistory'][traj][0]
new = self._newOptPointAdd(grad, traj,resample = self.resample[traj])
if new is not None:
self._createPerturbationPoints(traj, new, resample = self.resample[traj])
self._setupNewStorage(traj)
return False #not converged
def fractionalStepChangeFromGradHistory(self,traj):
"""
Uses the dot product between two successive gradients to determine a fractional multiplier for the step size.
For instance, if the dot product is 1.0, we're consistently moving in a straight line, so increase step size.
If the dot product is -1.0, we've gone forward and then backward again, so cut the step size down before moving again.
If the dot product is 0.0, we're moving orthogonally, so don't change step size just yet.
@ In, traj, int, the trajectory for whom we are creating a fractional step size
@ Out, frac, float, the fraction by which to multiply the existing step size
"""
# if we have a recommendation from elsewhere, take that first
if traj in self.recommendToGain.keys():
recommend = self.recommendToGain.pop(traj)
if recommend == 'cut':
frac = 1./self.gainShrinkFactor
elif recommend == 'grow':
frac = self.gainGrowthFactor
else:
self.raiseAnError(RuntimeError,'unrecognized gain recommendation:',recommend)
self.raiseADebug('Based on recommendation "{}", step size multiplier is: {}'.format(recommend,frac))
return frac
# otherwise, no recommendation for this trajectory, so move on
#if we don't have two evaluated gradients, just return 1.0
grad1 = self.counter['gradientHistory'][traj][1]
if len(grad1) == 0: # aka if grad1 is empty dict
return 1.0
#otherwise, do the dot product between the last two gradients
grad0 = self.counter['gradientHistory'][traj][0]
# scalar product
## NOTE assumes scalar or vector, not matrix, values
prod = np.sum( [np.sum(grad0[key]*grad1[key]) for key in grad0.keys()] )
#rescale from [-1, 1] to [1/g, g]
if prod > 0:
frac = self.gainGrowthFactor**prod
else:
frac = self.gainShrinkFactor**prod
self.raiseADebug('Based on gradient history, step size multiplier is:',frac)
return frac
def _getJobsByID(self,traj):
"""
Overwrite only if you need something special at the end of each run....
This function is used by optimizers that need to collect information from the just ended run
@ In, traj, int, ID of the trajectory for whom we collect jobs
@ Out, solutionExportUpdatedFlag, bool, True if the solutionExport needs updating
@ Out, solutionIndeces, list(int), location of updates within the full targetEvaluation data object
"""
solutionUpdateList = []
solutionIndeces = []
# get all the opt point results (these are the multiple evaluations of the opt point)
for i in range(self.gradDict['numIterForAve']):
identifier = i
solutionExportUpdatedFlag, index = self._checkModelFinish(traj, self.counter['solutionUpdate'][traj], str(identifier))
solutionUpdateList.append(solutionExportUpdatedFlag)
solutionIndeces.append(index)
solutionExportUpdatedFlag = all(solutionUpdateList)
return solutionExportUpdatedFlag,solutionIndeces
def getQueuedPoint(self,traj,denorm=True):
"""
Pops the first point off the submission queue (or errors if empty). By default denormalized the point before returning.
@ In, traj, int, the trajectory from whose queue we should obtain an entry
@ In, denorm, bool, optional, if True the input data will be denormalized before returning
@ Out, prefix, #_#_#
@ Out, point, dict, {var:val}
"""
try:
entry = self.submissionQueue[traj].popleft()
except IndexError:
self.raiseAnError(RuntimeError,'Tried to get a point from submission queue of trajectory "{}" but it is empty!'.format(traj))
prefix = entry['prefix']
point = entry['inputs']
if denorm:
point = self.denormalizeData(point)
return prefix,point
def _identifierToLabel(self,identifier):
"""
Maps identifiers (eg. prefix = trajectory_step_identifier) to labels (eg. ("grad",2) or ("opt",0))
@ In, identifier, int, number of evaluation within trajectory and step
@ Out, label, tuple, first entry is "grad" or "opt", second is which grad it belongs to (opt is always 0)
"""
if identifier in self.perturbationIndices:
category = 'grad'
if self.paramDict['pertSingleGrad'] == 1:
# no need to calculate the pertPerVar if pertSingleGrad is 1
pertPerVar = 1
else:
pertPerVar = self.paramDict['pertSingleGrad'] // (1+self.useCentralDiff)
varId = (identifier - self.gradDict['numIterForAve']) % pertPerVar
denoId = (identifier - self.gradDict['numIterForAve'])// self.paramDict['pertSingleGrad']
# for cdId 0 is the first cdID 1 is the second side of central Diff
if len(self.fullOptVars) == 1:
#expect 0 or 1 for cdID, but % len(self.fullOptVars) will always be 0 if len(self.fullOptVars)=1
cdId = (identifier - self.gradDict['numIterForAve']) % self.paramDict['pertSingleGrad']
else:
cdId = ((identifier - self.gradDict['numIterForAve'])// pertPerVar) % len(self.fullOptVars)
if not self.useCentralDiff:
cdId = 0
else:
category = 'opt'
varId = 0
denoId = identifier
cdId = 0
return category, varId, denoId, cdId
def localCheckConstraint(self, optVars, satisfaction = True):
"""
Local method to check whether a set of decision variables satisfy the constraint or not
@ In, optVars, dict, dictionary containing the value of decision variables to be checked, in form of {varName: varValue}
@ In, satisfaction, bool, optional, variable indicating how the caller determines the constraint satisfaction at the point optVars
@ Out, satisfaction, bool, variable indicating the satisfaction of constraints at the point optVars
"""
return satisfaction
def proposeNewPoint(self,traj,point):
"""
See base class. Used to set next recommended point to use for algorithm, overriding the gradient descent.
@ In, traj, int, trajectory who gets proposed point
@ In, point, dict, input space as dictionary {var:val}
@ Out, None
"""
Optimizer.proposeNewPoint(self,traj,point)
self.counter['varsUpdate'][traj] += 1 #usually done when evaluating gradient, but we're bypassing that
self.queueUpOptPointRuns(traj,self.recommendedOptPoint[traj])
def queueUpOptPointRuns(self,traj,point):
"""
Establishes a queue of runs, all on the point currently stored in "point", to satisfy stochastic denoising.
@ In, traj, int, the trajectory who needs the queue
@ In, point, dict, input space as {var:val} NORMALIZED
@ Out, None
"""
# TODO sanity check, this could be removed for efficiency later
for i in range(self.gradDict['numIterForAve']):
#entries into the queue are as {'inputs':{var:val}, 'prefix':runid} where runid is <traj>_<varUpdate>_<evalNumber> as 0_0_2
nPoint = {'inputs':copy.deepcopy(point)} #deepcopy to prevent simultaneous alteration
nPoint['prefix'] = self._createEvaluationIdentifier(traj,self.counter['varsUpdate'][traj],i) # from 0 to self.gradDict['numIterForAve'] are opt point evals
# this submission queue only have the denoise number of opt point
self.submissionQueue[traj].append(nPoint)
def _removeRedundantTraj(self, trajToRemove, currentInput):
"""
Local method to remove multiple trajectory
@ In, trajToRemove, int, identifier of the trajector to remove
@ In, currentInput, dict, the last variable on trajectory traj
@ Out, removed, bool, if True then trajectory was halted
"""
# TODO replace this with a kdtree search
removeFlag = False
def getRemoved(trajThatSurvived, fullList=None):
"""
Collect list of all the trajectories removed by this one, or removed by trajectories removed by this one, and etc
@ In, trajThatSurvived, int, surviving trajectory that has potentially removed others
@ In, fullList, list, optional, if included is the partial list to add to
@ Out, fullList, list, list of all traj removed (explicitly or implicitly) by this one
"""
if fullList is None:
fullList = []
removed = self.trajectoriesKilled[trajThatSurvived]
fullList += removed
for rm in removed:
fullList = getRemoved(rm, fullList)
return fullList
#end function definition
notEligibleToRemove = [trajToRemove] + getRemoved(trajToRemove)
# determine if "trajToRemove" should be terminated because it is following "traj"
for traj in self.optTraj:
#don't consider removal if comparing against itself,
# or a trajectory removed by this one, or a trajectory removed by a trajectory removed by this one (recursive)
# -> this prevents mutual destruction cases
if traj not in notEligibleToRemove:
#FIXME this can be quite an expensive operation, looping through each other trajectory
for updateKey in self.optVarsHist[traj].keys():
inp = self.optVarsHist[traj][updateKey] #FIXME deepcopy needed? Used to be present, but removed for now.
if len(inp) < 1: #empty
continue
dist = self.calculateMultivectorMagnitude( [inp[var] - currentInput[var] for var in self.getOptVars()] )
if dist < self.thresholdTrajRemoval:
self.raiseADebug('Halting trajectory "{}" because it is following trajectory "{}"'.format(trajToRemove,traj))
# cancel existing jobs for trajectory
self.cancelJobs([self._createEvaluationIdentifier(traj, self.counter['varsUpdate'][traj]-1, i) for i in self.perturbationIndices])
self.trajectoriesKilled[traj].append(trajToRemove)
#TODO the trajectory to remove should be chosen more carefully someday, for example, the one that has the smallest steps or lower loss value currently
removeFlag = True
break
if removeFlag:
break
if removeFlag:
for trajInd, tr in enumerate(self.optTrajLive):
if tr == trajToRemove:
self.optTrajLive.pop(trajInd)
self.status[trajToRemove] = {'process':'following traj '+str(traj),'reason':'removed as redundant'}
break
return True
else:
return False
def _setupNewStorage(self,traj,keepOpt=False):
"""
Assures correct structure for receiving results from sample evaluations
@ In, traj, int, trajectory of interest
@ In, keepOpt, bool, optional, if True then don't reset the denoised opt
@ Out, None
"""
# store denoised opt if requested
if keepOpt:
den = self.realizations[traj]['denoised']['opt']
denoises = self.gradDict['numIterForAve']
self.realizations[traj] = {'collect' : {'opt' : [ [] ],
'grad': [ [] for _ in range(self.paramDict['pertSingleGrad']) ] },
'denoised': {'opt' : [ [] ],
'grad': [ [] for _ in range(self.paramDict['pertSingleGrad']) ] },
'need' : denoises,
'accepted': 'rejected',
}
# reset opt if requested
if keepOpt:
self.realizations[traj]['denoised']['opt'] = den
self.realizations[traj]['accepted'] = True
def _updateConvergenceVector(self, traj, varsUpdate, currentPoint, conj=False):
"""
Local method to update convergence vector.
@ In, traj, int, identifier of the trajector to update
@ In, varsUpdate, int, current variables update iteration number
@ In, currentPoint, float, candidate point for optimization path
@ In, conj, bool, optional, identify whether using conjugate gradient to check convergence, if true then do not clear the persistance
@ Out, accepted, True if point was rejected otherwise False
"""
# first, check if we're at varsUpdate 0 (first entry); if so, we are at our first point
if varsUpdate == 0:
# we don't have enough points to decide to accept or reject the new point, so accept it as the initial point
self.raiseADebug('Accepting first point, since we have no rejection criteria.')
return True
## first, determine if we want to keep the new point
# obtain the loss values for comparison
currentLossVal = currentPoint[self.objVar]
oldPoint = self.counter['recentOptHist'][traj][0]
oldLossVal = oldPoint[self.objVar]
# see if new point is better than old point
newerIsBetter = self.checkIfBetter(currentLossVal,oldLossVal)
# if this was a recommended preconditioning point, we should not be converged.
pointFromRecommendation = self.status[traj]['reason'] == 'received recommended point'
# if improved, keep it and move forward; otherwise, reject it and recommend cutting step size
if newerIsBetter:
self.status[traj]['reason'] = 'found new opt point'
self.raiseADebug('Accepting potential opt point for improved loss value. Diff: {}, New: {}, Old: {}'.format(abs(currentLossVal-oldLossVal),currentLossVal,oldLossVal))
else:
self.status[traj]['reason'] = 'rejecting bad opt point'
self.raiseADebug('Rejecting potential opt point for worse loss value. old: "{}", new: "{}"'.format(oldLossVal,currentLossVal))
# cut the next step size to hopefully stay in the valley instead of climb up the other side
self.recommendToGain[traj] = 'cut'
## determine convergence
if pointFromRecommendation:
self.raiseAMessage('Setting convergence for Trajectory "{}" to "False" because of preconditioning.'.format(traj))
converged = False
else:
self.raiseAMessage('Checking convergence for Trajectory "{}":'.format(traj))
self.convergenceProgress[traj] = {} # tracks progress for grad norm, abs, rel tolerances
converged = False # updated for each individual criterion using "or" (pass one, pass all)
#printing utility
printString = ' {:<21}: {:<5}'
printVals = printString + ' (check: {:>+9.2e} < {:>+9.2e}, diff: {:>9.2e})'
# TODO rewrite this action as a lambda?
def printProgress(name,boolCheck,test,gold):
"""
Consolidates a commonly-used print statement to prevent errors and improve readability.
@ In, name, str, printed name of convergence check
@ In, boolCheck, bool, boolean convergence results for this check
@ In, test, float, value of check at current opt point
@ In, gold, float, convergence threshold value
@ Out, None
"""
self.raiseAMessage(printVals.format(name,str(boolCheck),test,gold,abs(test-gold)))
# "min step size" and "gradient norm" are both always valid checks, whether rejecting or accepting new point
## min step size check
try:
lastStep = self.counter['lastStepSize'][traj][0]
minStepSizeCheck = lastStep <= self.minStepSize
except KeyError:
#we reset the step size, so we don't have a value anymore
lastStep = np.nan
minStepSizeCheck = False
printProgress('Min step size',minStepSizeCheck,lastStep,self.minStepSize)
converged = converged or minStepSizeCheck
# if accepting new point, then "same coordinate" and "abs" and "rel" checks are also valid reasons to converge
if newerIsBetter:
#absolute tolerance
absLossDiff = abs(mathUtils.diffWithInfinites(currentLossVal,oldLossVal))
self.convergenceProgress[traj]['abs'] = absLossDiff
absTolCheck = absLossDiff <= self.absConvergenceTol
printProgress('Absolute Loss Diff',absTolCheck,absLossDiff,self.absConvergenceTol)
converged = converged or absTolCheck
#relative tolerance
relLossDiff = mathUtils.relativeDiff(currentLossVal,oldLossVal)
self.convergenceProgress[traj]['rel'] = relLossDiff
relTolCheck = relLossDiff <= self.relConvergenceTol
printProgress('Relative Loss Diff',relTolCheck,relLossDiff,self.relConvergenceTol)
converged = converged or relTolCheck
#same coordinate check
sameCoordinateCheck = True
for var in self.getOptVars():
# don't check constants, of course they're the same
if var in self.constants:
continue
old = oldPoint[var]
current = currentPoint[var]
# differentiate vectors and scalars for checking
if hasattr(old,'__len__'):
if any(old != current):
sameCoordinateCheck = False
break
else:
if old != current:
sameCoordinateCheck = False
break
self.raiseAMessage(printString.format('Same coordinate check',str(sameCoordinateCheck)))
converged = converged or sameCoordinateCheck
if converged:
# update number of successful convergences
self.counter['persistence'][traj] += 1
# check if we've met persistence requirement; if not, keep going
if self.counter['persistence'][traj] >= self.convergencePersistence:
self.raiseAMessage(' ... Trajectory "{}" converged {} times consecutively!'.format(traj,self.counter['persistence'][traj]))
self.convergeTraj[traj] = True
self.removeConvergedTrajectory(traj)
else:
self.raiseAMessage(' ... converged Traj "{}" {} times, required persistence is {}.'.format(traj,self.counter['persistence'][traj],self.convergencePersistence))
else:
if not conj:
self.counter['persistence'][traj] = 0
self.raiseAMessage(' ... continuing trajectory "{}".'.format(traj))
return newerIsBetter
def writeToSolutionExport(self,traj, recent, accepted, overwrite=None):
"""
Standardizes how the solution export is written to.
Uses data from "recentOptHist" and other counters to fill in values.
@ In, traj, int, the trajectory for which an entry is being written
@ In, recent, dict, the new optimal point (NORMALIZED) that needs to get written to the solution export
@ In, accepted, string, whether the most recent point was accepted or rejected as a bad move
@ In, overwrite, dict, optional, values to overwrite if requested as {key:val}
@ Out, None
"""
if overwrite is None:
overwrite = {}
# create realization to add to data object
rlz = {}
badValue = -1.0 #value to use if we don't have a value # TODO make this accessible to user?
for var in self.solutionExport.getVars():
# if this variable has indices, add them to the realization
indexes = self.solutionExport.getDimensions(var)[var]
if len(indexes):
# use the prefix to find the right realization
## NOTE there will be a problem with unsynchronized histories!
varUpdate = self.counter['solutionUpdate'][traj]
# negative values wouldn't make sense
varUpdate = max(0,varUpdate-1)
prefix = self._createEvaluationIdentifier(traj, varUpdate, 0)
_,match = self.mdlEvalHist.realization(matchDict = {'prefix':prefix})
for index in indexes:
rlz[index] = match[index]
# CASE: what variable is asked for:
# inputs, objVar, other outputs
if var in overwrite:
new = overwrite[var]
elif var in recent.keys():
new = self.denormalizeData(recent)[var]
elif var in self.constants:
new = self.constants[var]
# custom counters: varsUpdate, trajID, stepSize
elif var == 'varsUpdate':
new = self.counter['solutionUpdate'][traj]
elif var == 'trajID':
new = traj+1 # +1 is for historical reasons, when histories were indexed on 1 instead of 0
elif var == 'stepSize':
try:
new = self.counter['lastStepSize'][traj][0]
except KeyError:
new = badValue
elif var == 'accepted':
new = accepted
elif var.startswith( 'convergenceAbs'):
try:
new = self.convergenceProgress[traj].get('abs',badValue)
except KeyError:
new = badValue
elif var.startswith( 'convergenceRel'):
try:
new = self.convergenceProgress[traj].get('rel',badValue)
except KeyError:
new = badValue
else:
self.raiseAnError(IOError,'Unrecognized output request:',var)
# format for realization
rlz[var] = np.atleast_1d(new)
self.solutionExport.addRealization(rlz)
def checkResampleOption(self,traj):
"""
Turn on self.resample[traj] while checking self.resampleSwitch.
This method is equivalent to self.resample[traj] = self.resampleSwitch while needed
@ In, traj, int, the trajectory for which an entry is being written
@ Out, self.resampleSwitch, bool, True if resample switch is on
"""
return self.resampleSwitch
|
apache-2.0
| 3,588,424,962,406,685,000
| 53.128497
| 240
| 0.646571
| false
| 4.269577
| false
| false
| false
|
marcwagner/alarmclock
|
testset.py
|
1
|
6477
|
import time, datetime
import unittest
import alarm
format = "%a %d-%b-%Y %H:%M"
current_time = datetime.datetime(2015,4,4,16,4,0) #april 4 2015 16:04
default = {'name' : 'default', 'time' : datetime.time(12,0,0), 'days' : ('MO','TU','WE','TH','FR','SA','SU'),
'date' : None, 'path' : './', 'date' : None, 'active' : True}
alarms_list = ({'name' : 'current', 'time' : datetime.time(16,4,0)}, # the current time
{'name' : 'alarm1', 'time' : datetime.time(11,0,0)}, # every day at 11 am
{'name' : 'alarm2', 'time' : datetime.time(9,0,0), 'days' : ('MO','TU')},#monday and tuesday an 9 am
{'name' : 'alarm3', 'time' : datetime.time(22,0,0), 'days' : ('WE','TU','SA')}, # tuesday, wednesday, sunday at 10 pm
{'name' : 'christmas','time' : datetime.time(21,0,0), 'date' : datetime.date(2015,12,24)}, # 9pm on christmas eve
{'name' : 'past', 'time' : datetime.time(12,0,0), 'date' : datetime.date(1999,12,31)}, # noon on dec 31 1999 --> in the past
{'name' : 'path', 'time' : datetime.time(12,0,0), 'path' : '/media/music/1Kindermuziek/K3/K3-Eyo(2011)MP3 Nlt-release/'},
{'name' : 'n_active','time' : datetime.time(12,0,0), 'active' : False},
default)
alarm_times = {'current': datetime.datetime(2015,4,4,16,4,0),
'alarm1': datetime.datetime(2015,4,5,11,0,0),
'alarm2': datetime.datetime(2015,4,6,9,0,0),
'alarm3': datetime.datetime(2015,4,4,22,0,0),
'christmas': datetime.datetime(2015,12,24,21,0,0),
'past': None,
'path': datetime.datetime(2015,4,5,12,0,0),
'n_active':None,
'default': datetime.datetime(2015,4,5,12,0,0)}
root_playlist = ['engeltjes.mp3']
path_playlist = ['01 Eyo.mp3', '02 Hallo K3.mp3', '03 Willem - Alexander.mp3', '04 Smoorverliefd.mp3',
'05 K3 - Airlines.mp3', '06 Beroemd.mp3', '07 Meiden Van De Brandweer.mp3',
'08 Verstoppertje.mp3', '09 Telepathie.mp3', '10 Dubbeldekkertrein.mp3',
'11 Bel Me Ringeling.mp3', '12 Cowboys En Indianen.mp3']
class testcases_alarm(unittest.TestCase):
'''test all cases of working alarms'''
def are_all_the_vars_present(self, alarm, default, **a):
self.assertEqual(a.get('name'), alarm.name)
self.assertEqual(a.get('time'), alarm.time)
self.assertEqual(a.get('date', default['date']), alarm.date)
self.assertEqual(a.get('days', default['days']), alarm.days)
self.assertEqual(a.get('path', default['path']), alarm.path)
self.assertEqual(a.get('active', default['active']), alarm.active)
def test_create_alarm(self):
'''create a basic alarm'''
for a in alarms_list:
al = alarm.alarm(**a)
self.are_all_the_vars_present(al, default, **a)
def test_edit_alarm_correct(self):
'''update an alarm with the parameters of another alarm'''
if len(alarms_list) < 2: # need at least 2 alarms for this test
return
for i in range(len(alarms_list)-1):
a1 = alarms_list[i]
a2 = alarms_list[i+1]
al = alarm.alarm(**a1)
copy_of_default = default.copy()
self.are_all_the_vars_present(al, copy_of_default, **a1)
al.update_alarm(**a2)
copy_of_default.update(a1)
self.are_all_the_vars_present(al, copy_of_default, **a2)
def test_is_the_next_alarm_correct(self):
'''test next_alarm'''
for a in alarms_list:
myalarm = alarm.alarm(**a)
nexttime = alarm_times[myalarm.name]
self.assertEqual(myalarm.next_alarm(current_time), nexttime)
def test_add_alarm_correct_alarms(self):
'''create a set of alarms'''
alarms = alarm.alarmset()
for a in alarms_list:
alarms.add(alarm.alarm(**a))
al = alarms[-1]
self.are_all_the_vars_present(al, default, **a)
self.assertEqual(alarms.exists(a['name']), True)
def test_remove_alarm(self):
'''remove an alarm from a set'''
alarms = alarm.alarmset()
for a in alarms_list:
name = a['name']
alarms.add(alarm.alarm(**a))
self.assertEqual(alarms.exists(name), True)
alarms.remove(alarms[name])
self.assertEqual(alarms.exists(name), False)
def test_the_next_alarm_in_set(self):
'''alarmset next_alarm'''
alarms = alarm.alarmset()
for a in alarms_list:
alarms.add(alarm.alarm(**a))
self.assertEqual(alarms.next_alarm(current_time).next_alarm(current_time), current_time)
def test_generate_playlist(self):
'''based on the path, generate a list of files'''
alarm1 = alarm.alarm(**alarms_list[1])
path = alarm.alarm(**alarms_list[6])
self.assertEqual(alarm1.generate_playlist(), root_playlist)
self.assertEqual(path.generate_playlist(), path_playlist)
def test_play_a_song(self):
'''play a song form file'''
alarm1 = alarm.alarm(**alarms_list[1])
self.assertEqual(alarm1.playing, False)
self.assertEqual(alarm1.blocking, False)
self.assertEqual(alarm1.player_active(), False)
alarm1.play(root_playlist[0])
time.sleep(0.2)
self.assertEqual(alarm1.playing, True)
self.assertEqual(alarm1.blocking, False)
self.assertEqual(alarm1.player_active(), True)
alarm1.stop()
def test_save_and_load_alarms(self):
alarms_1 = alarm.alarmset()
alarms_2 = alarm.alarmset()
for a in alarms_list:
alarms_1.add(alarm.alarm(**a))
alarms_1.save_alarms('test_config.file')
alarms_2.load_alarms('test_config.file')
for a_1, a_2 in zip (alarms_1, alarms_2):
self.assertEqual(a_1.name, a_2.name)
self.assertEqual(a_1.time, a_2.time)
self.assertEqual(a_1.date, a_2.date)
self.assertEqual(a_1.days, a_2.days)
self.assertEqual(a_1.path, a_2.path)
self.assertEqual(a_1.active, a_2.active)
def test_player_active(self):
pass
def test_stop(self):
pass
if __name__ == '__main__':
unittest.main()
|
mit
| 8,184,091,684,865,688,000
| 42.469799
| 142
| 0.561371
| false
| 3.212798
| true
| false
| false
|
timsavage/extopen
|
setup.py
|
1
|
1140
|
from setuptools import setup, find_packages
setup(
name = 'extopen',
version = '0.1.1',
description = "Cross platform helper for opening a file with the default external application.",
long_description = open('README.rst').read(),
url='https://github.com/timsavage/extopen',
author = 'Tim Savage',
author_email = 'tim@savage.company',
license = 'BSD',
platforms = 'Posix; MacOS X; Windows',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
zip_safe = True,
py_modules = ['extopen']
)
|
bsd-3-clause
| 4,112,152,975,187,760,000
| 33.545455
| 100
| 0.607018
| false
| 4.367816
| false
| false
| false
|
reinaldomaslim/Singaboat_RobotX2016
|
robotx_vision/nodes/color_sequence.py
|
1
|
20932
|
#! /usr/bin/python
""" detect color sequence
ren ye 2016-10-21
reference:
http://stackoverflow.com/questions/14476683/identifying-color-sequence-in-opencv
algorithm:
# image preparation
1. subwindowing to light buoy by laser and camera
2. convert to hsv
3. check hue for the blob
# detection
1. wait until first detection is made
2. wait until no detection is found for 2 seconds
3. record color
4. if color is different from previous frame, add to sequence
5. if no detection, to step 2
6. if sequence is length 3, report and end
"""
#!/usr/bin/env python
""" camshift_color.py - Version 1.1 2013-12-20
Modification of the ROS OpenCV Camshift example using cv_bridge and publishing the ROI
coordinates to the /roi topic.
"""
import time
import rospy
import cv2
from cv2 import cv as cv
from robotx_vision.ros2opencv2 import ROS2OpenCV2
from std_msgs.msg import String, Float64MultiArray, MultiArrayDimension
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Image, RegionOfInterest
import numpy as np
from sklearn.cluster import KMeans
class ColorSequence(ROS2OpenCV2):
# taken from robotx_vision.find_shapes.Color_Detection
x0, y0 = 0, 0
hist_list = list()
MAX_LEN = 7 * 5
counter = 0
roi_x_offset, roi_y_offset, roi_width, roi_height = [0, 0, 0, 0]
def __init__(self, node_name, debug=False):
ROS2OpenCV2.__init__(self, node_name, debug)
self.sequence_pub = rospy.Publisher("color_sequence", Vector3, queue_size=10)
# self.odom_received = False
# rospy.Subscriber("odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
# while not self.odom_received:
# pass
# print "waiting for roi"
rospy.wait_for_message("led_sequence_roi", RegionOfInterest)
rospy.Subscriber("led_sequence_roi", RegionOfInterest, self.roi_callback, queue_size=50)
# print "roi received"
self.node_name = node_name
# The minimum saturation of the tracked color in HSV space,
# as well as the min and max value (the V in HSV) and a
# threshold on the backprojection probability image.
self.smin = rospy.get_param("~smin", 85)
self.vmin = rospy.get_param("~vmin", 50)
self.vmax = rospy.get_param("~vmax", 254)
self.threshold = rospy.get_param("~threshold", 50)
# all done in ros2opencv2.py:
# self.depth_sub, self.depth_callback, self.depth_image
# self.depth_image can be used globally
# self.depth_sub = rospy.Subscriber("input_depth_image", Image, self.depth_callback, queue_size=1)
# Create a number of windows for displaying the histogram,
# parameters controls, and backprojection image
if self.debug:
cv.NamedWindow("Histogram", cv.CV_WINDOW_NORMAL)
cv.MoveWindow("Histogram", 300, 50)
cv.NamedWindow("Parameters", 0)
cv.MoveWindow("Parameters", 700, 50)
cv.NamedWindow("Backproject", 0)
cv.MoveWindow("Backproject", 700, 325)
# cv.NamedWindow("Tracked_obj", 0)
# cv.MoveWindow("Tracked_obj", 700, 900)
# Create the slider controls for saturation, value and threshold
cv.CreateTrackbar("Saturation", "Parameters", self.smin, 255, self.set_smin)
cv.CreateTrackbar("Min Value", "Parameters", self.vmin, 255, self.set_vmin)
cv.CreateTrackbar("Max Value", "Parameters", self.vmax, 255, self.set_vmax)
cv.CreateTrackbar("Threshold", "Parameters", self.threshold, 255, self.set_threshold)
# Initialize a number of variables
self.hist = None
self.track_window = None
self.show_backproj = False
# These are the callbacks for the slider controls
def set_smin(self, pos):
self.smin = pos
def set_vmin(self, pos):
self.vmin = pos
def set_vmax(self, pos):
self.vmax = pos
def set_threshold(self, pos):
self.threshold = pos
# def color_masking(self, frame):
# hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv, self.lower_orange, self.upper_orange) + \
# cv2.inRange(hsv, self.lower_yellow, self.upper_yellow)
# return mask
# def depth_masking(self):
# self.depth_array = np.array(self.depth_image, dtype=np.float32)
# # self.depth_image
# depth_mask = np.zeros((self.frame_height, self.frame_width))
# for x in range(self.frame_height):
# for y in range(self.frame_width):
# try:
# # Get a depth value in meters
# z = self.depth_array[y, x]
# # Check for NaN values returned by the camera driver
# if isnan(z):
# continue
# except:
# # It seems to work best if we convert exceptions to big value
# z = 255
# if z < self.depth_threshold:
# depth_mask[y, x] = 255 # white
# else:
# depth_mask[y, x] = 0
# return depth_mask
def find_max_contour(self, mask):
# find contours
contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for multiple contours, find the maximum
area=list()
approx=list()
for i, cnt in enumerate(contours):
approx.append(cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True))
area.append(cv2.contourArea(cnt))
# overwrite selection box by automatic color matching
return cv2.boundingRect(approx[np.argmax(area)])
def find_contours(self, mask):
# find contours
mask = self.morphological(mask)
contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for multiple contours, find the maximum
area=list()
approx=list()
for i, cnt in enumerate(contours):
approx.append(cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True))
area.append(cv2.contourArea(cnt))
# overwrite selection box by automatic color matching
self.area_ratio = np.sum(area) / (self.frame_width * self.frame_height)
if np.max(area) / np.sum(area) > 0.95:
# print "one blob"
self.number_blob = 1
else:
# print "more than one blobs"
self.number_blob = 2
if len(area) > 1: # more than one blob, find the ratio of the 1st and 2nd largest
area_rev_sorted = np.sort(area)[::-1]
self.area_ratio = area_rev_sorted[0] / area_rev_sorted[1]
else: # only one blob found
self.area_ratio = 0
# print self.area_ratio
def morphological(self, mask):
""" tune the mask """
# morphological openning (remove small objects from the foreground)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# # morphological closing (fill small objects from the foreground)
kernel = np.ones((10, 10), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
return mask
# The main processing function computes the histogram and backprojection
def process_image(self, cv_image):
try:
# First blur the image
frame = cv2.blur(cv_image, (5, 5))
# Convert from RGB to HSV space
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Create a mask using the current saturation and value parameters
mask = cv2.inRange(hsv, np.array((0., self.smin, self.vmin)), np.array((180., 255., self.vmax)))
# not select any region, do automatic color rectangle
if self.selection is None:
# obtain the color mask
# edge_roi = self.edge_masking()
# print "edge mask", edge_mask
# create bounding box from the maximum mask
self.selection = [self.roi_x_offset, self.roi_y_offset, self.roi_width, self.roi_height] # in x y w h
# print "selection", self.selection
self.detect_box = self.selection
self.track_box = None
# If the user is making a selection with the mouse,
# calculate a new histogram to track
if self.selection is not None:
x0, y0, w, h = self.selection
x1 = x0 + w
y1 = y0 + h
self.track_window = (x0, y0, x1, y1)
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
self.hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(self.hist, self.hist, 0, 255, cv2.NORM_MINMAX)
self.hist = self.hist.reshape(-1)
self.hist_prob = np.argmax(self.hist)
# print self.hist_prob
self.show_hist()
if self.detect_box is not None:
self.selection = None
# If we have a histogram, track it with CamShift
# if self.hist is not None:
# # Compute the backprojection from the histogram
# backproject = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
# # Mask the backprojection with the mask created earlier
# backproject &= mask
# # Threshold the backprojection
# ret, backproject = cv2.threshold(backproject, self.threshold, 255, cv.CV_THRESH_TOZERO)
# # self.find_contours(backproject)
# # Detect blobs.
# # keypoints = self.blob_detector.detect(backproject)
# # print keypoints
# x, y, w, h = self.track_window
# if self.track_window is None or w <= 0 or h <=0:
# self.track_window = 0, 0, self.frame_width - 1, self.frame_height - 1
# # Set the criteria for the CamShift algorithm
# term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
# # Run the CamShift algorithm
# self.track_box, self.track_window = cv2.CamShift(backproject, self.track_window, term_crit)
# x0, y0, x1, y1 = self.track_window
# # print self.track_window
# # Display the resulting backprojection
# cv2.imshow("Backproject", backproject)
except:
pass
return cv_image
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
# print np.argmax(self.hist)
self.hist_prob = np.argmax(self.hist)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
if self.debug:
cv2.imshow('Histogram', img)
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage((320, 200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv2.rectangle(histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv2.rectangle(histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv2.cvtColor(histimg_hsv, cv.CV_HSV2BGR)
return histimg
def odom_callback(self, msg):
""" call back to subscribe, get odometry data:
pose and orientation of the current boat,
suffix 0 is for origin """
self.x0 = msg.pose.pose.position.x
self.y0 = msg.pose.pose.position.y
self.odom_received = True
def image_callback(self, data):
# Store the image header in a global variable
self.image_header = data.header
# Time this loop to get cycles per second
start = time.time()
# Convert the ROS image to OpenCV format using a cv_bridge helper function
frame = self.convert_image(data)
# Some webcams invert the image
if self.flip_image:
frame = cv2.flip(frame, 0)
# Store the frame width and height in a pair of global variables
if self.frame_width is None:
self.frame_size = (frame.shape[1], frame.shape[0])
self.frame_width, self.frame_height = self.frame_size
# Create the marker image we will use for display purposes
if self.marker_image is None:
self.marker_image = np.zeros_like(frame)
# Copy the current frame to the global image in case we need it elsewhere
self.frame = frame.copy()
# Reset the marker image if we're not displaying the history
if not self.keep_marker_history:
self.marker_image = np.zeros_like(self.marker_image)
# Process the image to detect and track objects or features
processed_image = self.process_image(frame)
# If the result is a greyscale image, convert to 3-channel for display purposes """
#if processed_image.channels == 1:
#cv.CvtColor(processed_image, self.processed_image, cv.CV_GRAY2BGR)
#else:
# Make a global copy
self.processed_image = processed_image.copy()
# Display the user-selection rectangle or point
self.display_selection()
# Night mode: only display the markers
if self.night_mode:
self.processed_image = np.zeros_like(self.processed_image)
# Merge the processed image and the marker image
self.display_image = cv2.bitwise_or(self.processed_image, self.marker_image)
# If we have a track box, then display it. The track box can be either a regular
# cvRect (x,y,w,h) or a rotated Rect (center, size, angle).
if self.show_boxes:
if self.track_box is not None and self.is_rect_nonzero(self.track_box):
if len(self.track_box) == 4:
x,y,w,h = self.track_box
size = (w, h)
center = (x + w / 2, y + h / 2)
angle = 0
self.track_box = (center, size, angle)
else:
(center, size, angle) = self.track_box
# For face tracking, an upright rectangle looks best
if self.face_tracking:
pt1 = (int(center[0] - size[0] / 2), int(center[1] - size[1] / 2))
pt2 = (int(center[0] + size[0] / 2), int(center[1] + size[1] / 2))
cv2.rectangle(self.display_image, pt1, pt2, cv.RGB(50, 255, 50), self.feature_size, 8, 0)
else:
# Otherwise, display a rotated rectangle
vertices = np.int0(cv2.cv.BoxPoints(self.track_box))
cv2.drawContours(self.display_image, [vertices], 0, cv.RGB(50, 255, 50), self.feature_size)
# If we don't yet have a track box, display the detect box if present
elif self.detect_box is not None and self.is_rect_nonzero(self.detect_box):
(pt1_x, pt1_y, w, h) = self.detect_box
if self.show_boxes:
cv2.rectangle(self.display_image, (pt1_x, pt1_y), (pt1_x + w, pt1_y + h), cv.RGB(50, 255, 50), self.feature_size, 8, 0)
# Publish the ROI
self.publish_roi()
self.publish_sequence()
# Compute the time for this loop and estimate CPS as a running average
end = time.time()
duration = end - start
fps = int(1.0 / duration)
self.cps_values.append(fps)
if len(self.cps_values) > self.cps_n_values:
self.cps_values.pop(0)
self.cps = int(sum(self.cps_values) / len(self.cps_values))
# Display CPS and image resolution if asked to
if self.show_text:
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
""" Print cycles per second (CPS) and resolution (RES) at top of the image """
if self.frame_size[0] >= 640:
vstart = 25
voffset = int(50 + self.frame_size[1] / 120.)
elif self.frame_size[0] == 320:
vstart = 15
voffset = int(35 + self.frame_size[1] / 120.)
else:
vstart = 10
voffset = int(20 + self.frame_size[1] / 120.)
cv2.putText(self.display_image, "CPS: " + str(self.cps), (10, vstart), font_face, font_scale, cv.RGB(255, 255, 0))
cv2.putText(self.display_image, "RES: " + str(self.frame_size[0]) + "X" + str(self.frame_size[1]), (10, voffset), font_face, font_scale, cv.RGB(255, 255, 0))
if self.debug:
# Update the image display
cv2.imshow(self.node_name, self.display_image)
# Process any keyboard commands
self.keystroke = cv2.waitKey(5)
if self.keystroke is not None and self.keystroke != -1:
try:
cc = chr(self.keystroke & 255).lower()
if cc == 'n':
self.night_mode = not self.night_mode
elif cc == 'f':
self.show_features = not self.show_features
elif cc == 'b':
self.show_boxes = not self.show_boxes
elif cc == 't':
self.show_text = not self.show_text
elif cc == 'q':
# The has press the q key, so exit
rospy.signal_shutdown("User hit q key to quit.")
except:
pass
def publish_sequence(self):
# Watch out for negative offsets
# pass
# append all data to hist_list
if len(self.hist_list) > self.MAX_LEN:
self.hist_list.pop(0)
try:
self.hist_list.append([self.counter, self.hist_prob])
except:
pass
# print self.hist_list
self.counter += 1
# find distinct hist_prob
try:
kmeans = KMeans(n_clusters=3)
kmeans.fit(np.array(self.hist_list))
color_sequence = kmeans.cluster_centers_
order = np.argsort(color_sequence[:,0])[::-1]
ordered_sequence = color_sequence[order,1]
# print "ordered seq", ordered_sequence
color_seq = ["", "", ""]
c = 0
for i in ordered_sequence:
print i
if i< 1 or i > 14:
color_seq[c] = "red"
elif 7 < i < 12:
color_seq[c] = "blue"
elif 1 < i < 4:
color_seq[c] = "yellow"
elif 3 < i < 7:
color_seq[c] = "green"
c += 1
print "color_seq", color_seq
a = Vector3()
a.x = ordered_sequence[0]
a.y = ordered_sequence[1]
a.z = ordered_sequence[2]
self.sequence_pub.publish(a)
rospy.set_param("/gui/color1", color_seq[0])
rospy.set_param("/gui/color2", color_seq[1])
rospy.set_param("/gui/color3", color_seq[2])
except:
print "sequence publish failed"
def roi_callback(self, msg):
# print msg.x_offset
self.roi_x_offset = msg.x_offset
self.roi_y_offset = msg.y_offset
self.roi_width = msg.width
self.roi_height = msg.height
# try:
# sequence = Vector3()
# sequence.data.x = self.x0
# sequence.data.y = self.y0
# sequence.data.z = self.hist_prob
# print sequence.data
# self.sequence_pub.publish(sequence)
# except:
# rospy.loginfo("Publishing sequence failed")
if __name__ == '__main__':
try:
node_name = "color_sequence"
ColorSequence(node_name, debug=True)
try:
rospy.init_node(node_name)
except:
pass
rospy.spin()
except KeyboardInterrupt:
print "Shutting down vision node."
cv.DestroyAllWindows()
|
gpl-3.0
| 2,657,304,766,669,864,400
| 37.691312
| 169
| 0.559431
| false
| 3.640348
| false
| false
| false
|
albertobeta/UberSimpleWebsockets
|
send.py
|
1
|
1219
|
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
from tornado.ioloop import PeriodicCallback
import tornado.web
from random import randint #Random generator
#Config
port = 9000 #Websocket Port
timeInterval= 2000 #Milliseconds
class WSHandler(tornado.websocket.WebSocketHandler):
#check_origin fixes an error 403 with Tornado
#http://stackoverflow.com/questions/24851207/tornado-403-get-warning-when-opening-websocket
def check_origin(self, origin):
return True
def open(self):
#Send message periodic via socket upon a time interval
self.callback = PeriodicCallback(self.send_values, timeInterval)
self.callback.start()
def send_values(self):
#Generates random values to send via websocket
self.write_message(str(randint(1,10)) + ';' + str(randint(1,10)) + ';' + str(randint(1,10)) + ';' + str(randint(1,10)))
def on_message(self, message):
pass
def on_close(self):
self.callback.stop()
application = tornado.web.Application([
(r'/', WSHandler),
])
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
|
mit
| -5,836,817,919,319,675,000
| 29.5
| 127
| 0.710418
| false
| 3.716463
| false
| false
| false
|
vojtechtrefny/anaconda
|
pyanaconda/anaconda.py
|
1
|
8728
|
# anaconda: The Red Hat Linux Installation program
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brent Fox <bfox@redhat.com>
# Mike Fulbright <msf@redhat.com>
# Jakub Jelinek <jakub@redhat.com>
# Jeremy Katz <katzj@redhat.com>
# Chris Lumens <clumens@redhat.com>
# Paul Nasrat <pnasrat@redhat.com>
# Erik Troan <ewt@rpath.com>
# Matt Wilson <msw@rpath.com>
#
import os
import sys
import stat
from glob import glob
from tempfile import mkstemp
import threading
from pyanaconda.bootloader import get_bootloader
from pyanaconda import constants
from pyanaconda import iutil
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
# Data for inhibiting the screensaver
self.dbus_session_connection = None
self.dbus_inhibit_id = None
# This is used to synchronize Gtk.main calls between the graphical
# interface and error dialogs. Whoever gets to their initialization code
# first will lock gui_initializing
self.gui_initialized = threading.Lock()
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if self.ksdata.ostreesetup.seen:
from pyanaconda.packaging.rpmostreepayload import RPMOSTreePayload
klass = RPMOSTreePayload
elif flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
else:
from pyanaconda.packaging.dnfpayload import DNFPayload
klass = DNFPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
# zRAM swap devices need to be protected
for zram_dev in glob("/dev/zram*"):
specs.append(zram_dev)
return specs
@property
def storage(self):
if not self._storage:
import blivet
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
for thread_id, frame in sys._current_frames().items():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text = dump_text.encode("utf-8")
iutil.eintr_retry_call(os.write, fd, dump_text)
iutil.eintr_retry_call(os.close, fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
# Run the GUI in non-fullscreen mode, so live installs can still
# use the window manager
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass, gui_lock=self.gui_initialized,
fullscreen=False)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root=None):
# this should go away at some point, but until it does, we
# need to keep it around.
if self.xdriver is None:
return
if root is None:
root = iutil.getSysroot()
if not os.path.isdir("%s/etc/X11" %(root,)):
os.makedirs("%s/etc/X11" %(root,), mode=0o755)
f = open("%s/etc/X11/xorg.conf" %(root,), 'w')
f.write('Section "Device"\n\tIdentifier "Videocard0"\n\tDriver "%s"\nEndSection\n' % self.xdriver)
f.close()
|
gpl-2.0
| 8,755,333,330,063,547,000
| 35.066116
| 106
| 0.601169
| false
| 4.074697
| false
| false
| false
|
PolicyStat/docx2html
|
docx2html/core.py
|
1
|
48167
|
import cgi
import logging
import os
import os.path
import re
from PIL import Image
from lxml import etree
from lxml.etree import XMLSyntaxError
from collections import namedtuple, defaultdict
from zipfile import ZipFile, BadZipfile
from docx2html.exceptions import (
ConversionFailed,
FileNotDocx,
MalformedDocx,
UnintendedTag,
SyntaxNotSupported,
)
DETECT_FONT_SIZE = False
EMUS_PER_PIXEL = 9525
NSMAP = {}
IMAGE_EXTENSIONS_TO_SKIP = ['emf', 'wmf', 'svg']
DEFAULT_LIST_NUMBERING_STYLE = 'decimal'
logger = logging.getLogger(__name__)
###
# Help functions
###
def replace_ext(file_path, new_ext):
"""
>>> replace_ext('one/two/three.four.doc', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', '.html')
'one/two/three.four.html'
>>> replace_ext('one/two/three.four.DOC', 'html')
'one/two/three.four.html'
"""
if not new_ext.startswith(os.extsep):
new_ext = os.extsep + new_ext
index = file_path.rfind(os.extsep)
return file_path[:index] + new_ext
def ensure_tag(tags):
# For some functions we can short-circuit and early exit if the tag is not
# the right kind.
def wrapped(f):
def wrap(*args, **kwargs):
passed_in_tag = args[0]
if passed_in_tag is None:
return None
w_namespace = get_namespace(passed_in_tag, 'w')
valid_tags = [
'%s%s' % (w_namespace, t) for t in tags
]
if passed_in_tag.tag in valid_tags:
return f(*args, **kwargs)
return None
return wrap
return wrapped
def get_namespace(el, namespace):
if namespace not in NSMAP:
NSMAP[namespace] = '{%s}' % el.nsmap[namespace]
return NSMAP[namespace]
def convert_image(target, image_size):
_, extension = os.path.splitext(os.path.basename(target))
# If the image size has a zero in it early return
if image_size and not all(image_size):
return target
# All the image types need to be converted to gif.
invalid_extensions = (
'.bmp',
'.dib',
'.tiff',
'.tif',
)
# Open the image and get the format.
try:
image = Image.open(target)
except IOError:
return target
image_format = image.format
image_file_name = target
# Make sure the size of the image and the size of the embedded image are
# the same.
if image_size is not None and image.size != image_size:
# Resize if needed
try:
image = image.resize(image_size, Image.ANTIALIAS)
except IOError:
pass
# If we have an invalid extension, change the format to gif.
if extension.lower() in invalid_extensions:
image_format = 'GIF'
image_file_name = replace_ext(target, '.gif')
# Resave the image (Post resizing) with the correct format
try:
image.save(image_file_name, image_format)
except IOError:
return target
return image_file_name
@ensure_tag(['p'])
def get_font_size(p, styles_dict):
w_namespace = get_namespace(p, 'w')
r = p.find('%sr' % w_namespace)
if r is None:
return None
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return None
size = rpr.find('%ssz' % w_namespace)
if size is None:
# Need to get the font size off the styleId
pPr = p.find('%spPr' % w_namespace)
if pPr is None:
return None
pStyle = pPr.find('%spStyle' % w_namespace)
if pStyle is None:
return None
pStyle = pStyle.get('%sval' % w_namespace)
font_size = None
style_value = styles_dict.get(pStyle, None)
if style_value is None:
return None
if 'font_size' in style_value:
font_size = styles_dict[pStyle]['font_size']
while font_size is None:
old_pStyle = pStyle
# If pStyle is not in the styles_dict then we have to break.
if pStyle not in styles_dict:
break
# If based on is not in the styles_dict for pStyle then we have to
# break.
if 'based_on' not in styles_dict[pStyle]:
break
# Try to derive what the font size is based on what the current
# style is based on.
pStyle = styles_dict[pStyle]['based_on']
if old_pStyle == pStyle:
break
# If pStyle is not in styles_dict then break.
if pStyle not in styles_dict:
break
# We have found a new font size
font_size = styles_dict[pStyle]['font_size']
return font_size
return size.get('%sval' % w_namespace)
@ensure_tag(['p'])
def is_natural_header(el, styles_dict):
w_namespace = get_namespace(el, 'w')
pPr = el.find('%spPr' % w_namespace)
if pPr is None:
return False
pStyle = pPr.find('%spStyle' % w_namespace)
if pStyle is None:
return False
style_id = pStyle.get('%sval' % w_namespace)
if (
style_id in styles_dict and
'header' in styles_dict[style_id] and
styles_dict[style_id]['header']):
return styles_dict[style_id]['header']
@ensure_tag(['p'])
def is_header(el, meta_data):
if _is_top_level_upper_roman(el, meta_data):
return 'h2'
el_is_natural_header = is_natural_header(el, meta_data.styles_dict)
if el_is_natural_header:
return el_is_natural_header
if _is_li(el):
return False
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
return False
# Check to see if this is a header because the font size is different than
# the normal font size.
# Since get_font_size is a method used before meta is created, just pass in
# styles_dict.
if DETECT_FONT_SIZE:
font_size = get_font_size(el, meta_data.styles_dict)
if font_size is not None:
if meta_data.font_sizes_dict[font_size]:
return meta_data.font_sizes_dict[font_size]
# If a paragraph is longer than eight words it is likely not supposed to be
# an h tag.
num_words = len(
etree.tostring(
el,
encoding=unicode,
method='text',
).split(' ')
)
if num_words > 8:
return False
# Check to see if the full line is bold.
whole_line_bold, whole_line_italics = whole_line_styled(el)
if whole_line_bold or whole_line_italics:
return 'h2'
return False
@ensure_tag(['p'])
def _is_top_level_upper_roman(el, meta_data):
w_namespace = get_namespace(el, 'w')
ilvl = get_ilvl(el, w_namespace)
# If this list is not in the root document (indentation of 0), then it
# cannot be a top level upper roman list.
if ilvl != 0:
return False
numId = get_numId(el, w_namespace)
list_type = meta_data.numbering_dict[numId].get(ilvl, False)
return list_type == 'upperRoman'
@ensure_tag(['p'])
def _is_li(el):
return len(el.xpath('.//w:numPr/w:ilvl', namespaces=el.nsmap)) != 0
@ensure_tag(['p'])
def is_li(el, meta_data):
"""
The only real distinction between an ``li`` tag and a ``p`` tag is that an
``li`` tag has an attribute called numPr which holds the list id and ilvl
(indentation level)
"""
if is_header(el, meta_data):
return False
return _is_li(el)
def has_text(p):
"""
It is possible for a ``p`` tag in document.xml to not have any content. If
this is the case we do not want that tag interfering with things like
lists. Detect if this tag has any content.
"""
return '' != etree.tostring(p, encoding=unicode, method='text').strip()
def is_last_li(li, meta_data, current_numId):
"""
Determine if ``li`` is the last list item for a given list
"""
if not is_li(li, meta_data):
return False
w_namespace = get_namespace(li, 'w')
next_el = li
while True:
# If we run out of element this must be the last list item
if next_el is None:
return True
next_el = next_el.getnext()
# Ignore elements that are not a list item
if not is_li(next_el, meta_data):
continue
new_numId = get_numId(next_el, w_namespace)
if current_numId != new_numId:
return True
# If we have gotten here then we have found another list item in the
# current list, so ``li`` is not the last li in the list.
return False
@ensure_tag(['p'])
def get_single_list_nodes_data(li, meta_data):
"""
Find consecutive li tags that have content that have the same list id.
"""
yield li
w_namespace = get_namespace(li, 'w')
current_numId = get_numId(li, w_namespace)
starting_ilvl = get_ilvl(li, w_namespace)
el = li
while True:
el = el.getnext()
if el is None:
break
# If the tag has no content ignore it.
if not has_text(el):
continue
# Stop the lists if you come across a list item that should be a
# heading.
if _is_top_level_upper_roman(el, meta_data):
break
if (
is_li(el, meta_data) and
(starting_ilvl > get_ilvl(el, w_namespace))):
break
new_numId = get_numId(el, w_namespace)
if new_numId is None or new_numId == -1:
# Not a p tag or a list item
yield el
continue
# If the list id of the next tag is different that the previous that
# means a new list being made (not nested)
if current_numId != new_numId:
# Not a subsequent list.
break
if is_last_li(el, meta_data, current_numId):
yield el
break
yield el
@ensure_tag(['p'])
def get_ilvl(li, w_namespace):
"""
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
"""
ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap)
if len(ilvls) == 0:
return -1
return int(ilvls[0].get('%sval' % w_namespace))
@ensure_tag(['p'])
def get_numId(li, w_namespace):
"""
The numId on an li tag maps to the numbering dictionary along side the ilvl
to determine what the list should look like (unordered, digits, lower
alpha, etc)
"""
numIds = li.xpath('.//w:numId', namespaces=li.nsmap)
if len(numIds) == 0:
return -1
return numIds[0].get('%sval' % w_namespace)
def create_list(list_type):
"""
Based on the passed in list_type create a list objects (ol/ul). In the
future this function will also deal with what the numbering of an ordered
list should look like.
"""
list_types = {
'bullet': 'ul',
}
el = etree.Element(list_types.get(list_type, 'ol'))
# These are the supported list style types and their conversion to css.
list_type_conversions = {
'decimal': DEFAULT_LIST_NUMBERING_STYLE,
'decimalZero': 'decimal-leading-zero',
'upperRoman': 'upper-roman',
'lowerRoman': 'lower-roman',
'upperLetter': 'upper-alpha',
'lowerLetter': 'lower-alpha',
'ordinal': DEFAULT_LIST_NUMBERING_STYLE,
'cardinalText': DEFAULT_LIST_NUMBERING_STYLE,
'ordinalText': DEFAULT_LIST_NUMBERING_STYLE,
}
if list_type != 'bullet':
el.set(
'data-list-type',
list_type_conversions.get(list_type, DEFAULT_LIST_NUMBERING_STYLE),
)
return el
@ensure_tag(['tc'])
def get_v_merge(tc):
"""
vMerge is what docx uses to denote that a table cell is part of a rowspan.
The first cell to have a vMerge is the start of the rowspan, and the vMerge
will be denoted with 'restart'. If it is anything other than restart then
it is a continuation of another rowspan.
"""
if tc is None:
return None
v_merges = tc.xpath('.//w:vMerge', namespaces=tc.nsmap)
if len(v_merges) != 1:
return None
v_merge = v_merges[0]
return v_merge
@ensure_tag(['tc'])
def get_grid_span(tc):
"""
gridSpan is what docx uses to denote that a table cell has a colspan. This
is much more simple than rowspans in that there is a one-to-one mapping
from gridSpan to colspan.
"""
w_namespace = get_namespace(tc, 'w')
grid_spans = tc.xpath('.//w:gridSpan', namespaces=tc.nsmap)
if len(grid_spans) != 1:
return 1
grid_span = grid_spans[0]
return int(grid_span.get('%sval' % w_namespace))
@ensure_tag(['tr'])
def get_td_at_index(tr, index):
"""
When calculating the rowspan for a given cell it is required to find all
table cells 'below' the initial cell with a v_merge. This function will
return the td element at the passed in index, taking into account colspans.
"""
current = 0
for td in tr.xpath('.//w:tc', namespaces=tr.nsmap):
if index == current:
return td
current += get_grid_span(td)
@ensure_tag(['tbl'])
def get_rowspan_data(table):
w_namespace = get_namespace(table, 'w')
# We need to keep track of what table row we are on as well as which table
# cell we are on.
tr_index = 0
td_index = 0
# Get a list of all the table rows.
tr_rows = list(table.xpath('.//w:tr', namespaces=table.nsmap))
# Loop through each table row.
for tr in table.xpath('.//w:tr', namespaces=table.nsmap):
# Loop through each table cell.
for td in tr.xpath('.//w:tc', namespaces=tr.nsmap):
# Check to see if this cell has a v_merge
v_merge = get_v_merge(td)
# If not increment the td_index and move on
if v_merge is None:
td_index += get_grid_span(td)
continue
# If it does have a v_merge we need to see if it is the ``root``
# table cell (the first in a row to have a rowspan)
# If the value is restart then this is the table cell that needs
# the rowspan.
if v_merge.get('%sval' % w_namespace) == 'restart':
row_span = 1
# Loop through each table row after the current one.
for tr_el in tr_rows[tr_index + 1:]:
# Get the table cell at the current td_index.
td_el = get_td_at_index(tr_el, td_index)
td_el_v_merge = get_v_merge(td_el)
# If the td_ell does not have a v_merge then the rowspan is
# done.
if td_el_v_merge is None:
break
val = td_el_v_merge.get('%sval' % w_namespace)
# If the v_merge is restart then there is another cell that
# needs a rowspan, so the current cells rowspan is done.
if val == 'restart':
break
# Increment the row_span
row_span += 1
yield row_span
# Increment the indexes.
td_index += get_grid_span(td)
tr_index += 1
# Reset the td_index when we finish each table row.
td_index = 0
@ensure_tag(['b', 'i', 'u'])
def style_is_false(style):
"""
For bold, italics and underline. Simply checking to see if the various tags
are present will not suffice. If the tag is present and set to False then
the style should not be present.
"""
if style is None:
return False
w_namespace = get_namespace(style, 'w')
return style.get('%sval' % w_namespace) != 'false'
@ensure_tag(['r'])
def is_bold(r):
"""
The function will return True if the r tag passed in is considered bold.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
bold = rpr.find('%sb' % w_namespace)
return style_is_false(bold)
@ensure_tag(['r'])
def is_italics(r):
"""
The function will return True if the r tag passed in is considered
italicized.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
italics = rpr.find('%si' % w_namespace)
return style_is_false(italics)
@ensure_tag(['r'])
def is_underlined(r):
"""
The function will return True if the r tag passed in is considered
underlined.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
underline = rpr.find('%su' % w_namespace)
return style_is_false(underline)
@ensure_tag(['p'])
def is_title(p):
"""
Certain p tags are denoted as ``Title`` tags. This function will return
True if the passed in p tag is considered a title.
"""
w_namespace = get_namespace(p, 'w')
styles = p.xpath('.//w:pStyle', namespaces=p.nsmap)
if len(styles) == 0:
return False
style = styles[0]
return style.get('%sval' % w_namespace) == 'Title'
@ensure_tag(['r'])
def get_text_run_content_data(r):
"""
It turns out that r tags can contain both t tags and drawing tags. Since we
need both, this function will return them in the order in which they are
found.
"""
w_namespace = get_namespace(r, 'w')
valid_elements = (
'%st' % w_namespace,
'%sdrawing' % w_namespace,
'%spict' % w_namespace,
'%sbr' % w_namespace,
)
for el in r:
if el.tag in valid_elements:
yield el
@ensure_tag(['drawing', 'pict'])
def get_image_id(drawing):
r_namespace = get_namespace(drawing, 'r')
for el in drawing.iter():
# For drawing
image_id = el.get('%sembed' % r_namespace)
if image_id is not None:
return image_id
# For pict
if 'v' not in el.nsmap:
continue
v_namespace = get_namespace(drawing, 'v')
if el.tag == '%simagedata' % v_namespace:
image_id = el.get('%sid' % r_namespace)
if image_id is not None:
return image_id
@ensure_tag(['p'])
def whole_line_styled(p):
"""
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
"""
r_tags = p.xpath('.//w:r', namespaces=p.nsmap)
tags_are_bold = [
is_bold(r) or is_underlined(r) for r in r_tags
]
tags_are_italics = [
is_italics(r) for r in r_tags
]
return all(tags_are_bold), all(tags_are_italics)
MetaData = namedtuple(
'MetaData',
[
'numbering_dict',
'relationship_dict',
'styles_dict',
'font_sizes_dict',
'image_handler',
'image_sizes',
],
)
###
# Pre-processing
###
def get_numbering_info(tree):
"""
There is a separate file called numbering.xml that stores how lists should
look (unordered, digits, lower case letters, etc.). Parse that file and
return a dictionary of what each combination should be based on list Id and
level of indentation.
"""
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
num_ids = {}
result = defaultdict(dict)
# First find all the list types
for list_type in tree.findall('%snum' % w_namespace):
list_id = list_type.get('%snumId' % w_namespace)
# Each list type is assigned an abstractNumber that defines how lists
# should look.
abstract_number = list_type.find('%sabstractNumId' % w_namespace)
num_ids[abstract_number.get('%sval' % w_namespace)] = list_id
# Loop through all the abstractNumbers
for abstract_number in tree.findall('%sabstractNum' % w_namespace):
abstract_num_id = abstract_number.get('%sabstractNumId' % w_namespace)
# If we find an abstractNumber that is not being used in the document
# then ignore it.
if abstract_num_id not in num_ids:
continue
# Get the level of the abstract number.
for lvl in abstract_number.findall('%slvl' % w_namespace):
ilvl = int(lvl.get('%silvl' % w_namespace))
lvl_format = lvl.find('%snumFmt' % w_namespace)
list_style = lvl_format.get('%sval' % w_namespace)
# Based on the list type and the ilvl (indentation level) store the
# needed style.
result[num_ids[abstract_num_id]][ilvl] = list_style
return result
def get_style_dict(tree):
"""
Some things that are considered lists are actually supposed to be H tags
(h1, h2, etc.) These can be denoted by their styleId
"""
# This is a partial document and actual h1 is the document title, which
# will be displayed elsewhere.
headers = {
'heading 1': 'h2',
'heading 2': 'h3',
'heading 3': 'h4',
'heading 4': 'h5',
'heading 5': 'h6',
'heading 6': 'h6',
'heading 7': 'h6',
'heading 8': 'h6',
'heading 9': 'h6',
'heading 10': 'h6',
}
if tree is None:
return {}
w_namespace = get_namespace(tree, 'w')
result = {}
for el in tree:
style_id = el.get('%sstyleId' % w_namespace)
el_result = {
'header': False,
'font_size': None,
'based_on': None,
}
# Get the header info
name = el.find('%sname' % w_namespace)
if name is None:
continue
value = name.get('%sval' % w_namespace).lower()
if value in headers:
el_result['header'] = headers[value]
# Get the size info.
rpr = el.find('%srPr' % w_namespace)
if rpr is None:
continue
size = rpr.find('%ssz' % w_namespace)
if size is None:
el_result['font_size'] = None
else:
el_result['font_size'] = size.get('%sval' % w_namespace)
# Get based on info.
based_on = el.find('%sbasedOn' % w_namespace)
if based_on is None:
el_result['based_on'] = None
else:
el_result['based_on'] = based_on.get('%sval' % w_namespace)
result[style_id] = el_result
return result
def get_image_sizes(tree):
drawings = []
result = {}
w_namespace = get_namespace(tree, 'w')
for el in tree.iter():
if el.tag == '%sdrawing' % w_namespace:
drawings.append(el)
for d in drawings:
for el in d.iter():
if 'a' not in el.nsmap:
continue
a_namespace = get_namespace(el, 'a')
if el.tag == '%sxfrm' % a_namespace:
ext = el.find('%sext' % a_namespace)
cx = int(ext.get('cx')) / EMUS_PER_PIXEL
cy = int(ext.get('cy')) / EMUS_PER_PIXEL
result[get_image_id(d)] = (cx, cy)
return result
def get_relationship_info(tree, media, image_sizes):
"""
There is a separate file holds the targets to links as well as the targets
for images. Return a dictionary based on the relationship id and the
target.
"""
if tree is None:
return {}
result = {}
# Loop through each relationship.
for el in tree.iter():
el_id = el.get('Id')
if el_id is None:
continue
# Store the target in the result dict.
target = el.get('Target')
if any(
target.lower().endswith(ext) for
ext in IMAGE_EXTENSIONS_TO_SKIP):
continue
if target in media:
image_size = image_sizes.get(el_id)
target = convert_image(media[target], image_size)
# cgi will replace things like & < > with & < >
result[el_id] = cgi.escape(target)
return result
def get_font_sizes_dict(tree, styles_dict):
font_sizes_dict = defaultdict(int)
# Get all the fonts sizes and how often they are used in a dict.
for p in tree.xpath('//w:p', namespaces=tree.nsmap):
# If this p tag is a natural header, skip it
if is_natural_header(p, styles_dict):
continue
if _is_li(p):
continue
font_size = get_font_size(p, styles_dict)
if font_size is None:
continue
font_sizes_dict[font_size] += 1
# Find the most used font size.
most_used_font_size = -1
highest_count = -1
for size, count in font_sizes_dict.items():
if count > highest_count:
highest_count = count
most_used_font_size = size
# Consider the most used font size to be the 'default' font size. Any font
# size that is different will be considered an h tag.
result = {}
for size in font_sizes_dict:
if size is None:
continue
if int(size) > int(most_used_font_size):
# Not an h tag
result[size] = 'h2'
else:
result[size] = None
return result
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data
###
# HTML Building functions
###
def get_ordered_list_type(meta_data, numId, ilvl):
"""
Return the list type. If numId or ilvl not in the numbering dict then
default to returning decimal.
This function only cares about ordered lists, unordered lists get dealt
with elsewhere.
"""
# Early return if numId or ilvl are not valid
numbering_dict = meta_data.numbering_dict
if numId not in numbering_dict:
return DEFAULT_LIST_NUMBERING_STYLE
if ilvl not in numbering_dict[numId]:
return DEFAULT_LIST_NUMBERING_STYLE
return meta_data.numbering_dict[numId][ilvl]
def build_list(li_nodes, meta_data):
"""
Build the list structure and return the root list
"""
# Need to keep track of all incomplete nested lists.
ol_dict = {}
# Need to keep track of the current indentation level.
current_ilvl = -1
# Need to keep track of the current list id.
current_numId = -1
# Need to keep track of list that new li tags should be added too.
current_ol = None
# Store the first list created (the root list) for the return value.
root_ol = None
visited_nodes = []
list_contents = []
def _build_li(list_contents):
data = '<br />'.join(t for t in list_contents if t is not None)
return etree.XML('<li>%s</li>' % data)
def _build_non_li_content(el, meta_data):
w_namespace = get_namespace(el, 'w')
if el.tag == '%stbl' % w_namespace:
new_el, visited_nodes = build_table(el, meta_data)
return etree.tostring(new_el), visited_nodes
elif el.tag == '%sp' % w_namespace:
return get_element_content(el, meta_data), [el]
if has_text(el):
raise UnintendedTag('Did not expect %s' % el.tag)
def _merge_lists(ilvl, current_ilvl, ol_dict, current_ol):
for i in reversed(range(ilvl, current_ilvl)):
# Any list that is more indented that ilvl needs to
# be merged to the list before it.
if i not in ol_dict:
continue
if ol_dict[i] is not current_ol:
if ol_dict[i] is current_ol:
continue
ol_dict[i][-1].append(current_ol)
current_ol = ol_dict[i]
# Clean up finished nested lists.
for key in list(ol_dict):
if key > ilvl:
del ol_dict[key]
return current_ol
for li_node in li_nodes:
w_namespace = get_namespace(li_node, 'w')
if not is_li(li_node, meta_data):
# Get the content and visited nodes
new_el, el_visited_nodes = _build_non_li_content(
li_node,
meta_data,
)
list_contents.append(new_el)
visited_nodes.extend(el_visited_nodes)
continue
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Get the data needed to build the current list item
list_contents.append(get_element_content(
li_node,
meta_data,
))
ilvl = get_ilvl(li_node, w_namespace)
numId = get_numId(li_node, w_namespace)
list_type = get_ordered_list_type(meta_data, numId, ilvl)
# If the ilvl is greater than the current_ilvl or the list id is
# changing then we have the first li tag in a nested list. We need to
# create a new list object and update all of our variables for keeping
# track.
if (ilvl > current_ilvl) or (numId != current_numId):
# Only create a new list
ol_dict[ilvl] = create_list(list_type)
current_ol = ol_dict[ilvl]
current_ilvl = ilvl
current_numId = numId
# Both cases above are not True then we need to close all lists greater
# than ilvl and then remove them from the ol_dict
else:
# Merge any nested lists that need to be merged.
current_ol = _merge_lists(
ilvl=ilvl,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
# Set the root list after the first list is created.
if root_ol is None:
root_ol = current_ol
# Set the current list.
if ilvl in ol_dict:
current_ol = ol_dict[ilvl]
else:
# In some instances the ilvl is not in the ol_dict, if that is the
# case, create it here (not sure how this happens but it has
# before.) Only do this if the current_ol is not the root_ol,
# otherwise etree will crash.
if current_ol is not root_ol:
# Merge the current_ol into the root_ol. _merge_lists is not
# equipped to handle this situation since the only way to get
# into this block of code is to have mangled ilvls.
root_ol[-1].append(current_ol)
# Reset the current_ol
current_ol = create_list(list_type)
# Create the li element.
visited_nodes.extend(list(li_node.iter()))
# If a list item is the last thing in a document, then you will need to add
# it here. Should probably figure out how to get the above logic to deal
# with it.
if list_contents:
li_el = _build_li(list_contents)
list_contents = []
current_ol.append(li_el)
# Merge up any nested lists that have not been merged.
current_ol = _merge_lists(
ilvl=0,
current_ilvl=current_ilvl,
ol_dict=ol_dict,
current_ol=current_ol,
)
return root_ol, visited_nodes
@ensure_tag(['tr'])
def build_tr(tr, meta_data, row_spans):
"""
This will return a single tr element, with all tds already populated.
"""
# Create a blank tr element.
tr_el = etree.Element('tr')
w_namespace = get_namespace(tr, 'w')
visited_nodes = []
for el in tr:
if el in visited_nodes:
continue
visited_nodes.append(el)
# Find the table cells.
if el.tag == '%stc' % w_namespace:
v_merge = get_v_merge(el)
# If there is a v_merge and it is not restart then this cell can be
# ignored.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) != 'restart'):
continue
# Loop through each and build a list of all the content.
texts = []
for td_content in el:
# Since we are doing look-a-heads in this loop we need to check
# again to see if we have already visited the node.
if td_content in visited_nodes:
continue
# Check to see if it is a list or a regular paragraph.
if is_li(td_content, meta_data):
# If it is a list, create the list and update
# visited_nodes.
li_nodes = get_single_list_nodes_data(
td_content,
meta_data,
)
list_el, list_visited_nodes = build_list(
li_nodes,
meta_data,
)
visited_nodes.extend(list_visited_nodes)
texts.append(etree.tostring(list_el))
elif td_content.tag == '%stbl' % w_namespace:
table_el, table_visited_nodes = build_table(
td_content,
meta_data,
)
visited_nodes.extend(table_visited_nodes)
texts.append(etree.tostring(table_el))
elif td_content.tag == '%stcPr' % w_namespace:
# Do nothing
visited_nodes.append(td_content)
continue
else:
text = get_element_content(
td_content,
meta_data,
is_td=True,
)
texts.append(text)
data = '<br />'.join(t for t in texts if t is not None)
td_el = etree.XML('<td>%s</td>' % data)
# if there is a colspan then set it here.
colspan = get_grid_span(el)
if colspan > 1:
td_el.set('colspan', '%d' % colspan)
v_merge = get_v_merge(el)
# If this td has a v_merge and it is restart then set the rowspan
# here.
if (
v_merge is not None and
v_merge.get('%sval' % w_namespace) == 'restart'):
rowspan = next(row_spans)
td_el.set('rowspan', '%d' % rowspan)
tr_el.append(td_el)
return tr_el
@ensure_tag(['tbl'])
def build_table(table, meta_data):
"""
This returns a table object with all rows and cells correctly populated.
"""
# Create a blank table element.
table_el = etree.Element('table')
w_namespace = get_namespace(table, 'w')
# Get the rowspan values for cells that have a rowspan.
row_spans = get_rowspan_data(table)
for el in table:
if el.tag == '%str' % w_namespace:
# Create the tr element.
tr_el = build_tr(
el,
meta_data,
row_spans,
)
# And append it to the table.
table_el.append(tr_el)
visited_nodes = list(table.iter())
return table_el, visited_nodes
@ensure_tag(['t'])
def get_t_tag_content(
t, parent, remove_bold, remove_italics, meta_data):
"""
Generate the string data that for this particular t tag.
"""
if t is None or t.text is None:
return ''
# Need to escape the text so that we do not accidentally put in text
# that is not valid XML.
# cgi will replace things like & < > with & < >
text = cgi.escape(t.text)
# Wrap the text with any modifiers it might have (bold, italics or
# underline)
el_is_bold = not remove_bold and (
is_bold(parent) or
is_underlined(parent)
)
el_is_italics = not remove_italics and is_italics(parent)
if el_is_bold:
text = '<strong>%s</strong>' % text
if el_is_italics:
text = '<em>%s</em>' % text
return text
def _get_image_size_from_image(target):
image = Image.open(target)
return image.size
def build_hyperlink(el, meta_data):
# If we have a hyperlink we need to get relationship_id
r_namespace = get_namespace(el, 'r')
hyperlink_id = el.get('%sid' % r_namespace)
# Once we have the hyperlink_id then we need to replace the
# hyperlink tag with its child run tags.
content = get_element_content(
el,
meta_data,
remove_bold=True,
remove_italics=True,
)
if not content:
return ''
if hyperlink_id in meta_data.relationship_dict:
href = meta_data.relationship_dict[hyperlink_id]
# Do not do any styling on hyperlinks
return '<a href="%s">%s</a>' % (href, content)
return ''
def build_image(el, meta_data):
image_id = get_image_id(el)
if image_id not in meta_data.relationship_dict:
# This image does not have an image_id
return ''
src = meta_data.image_handler(
image_id,
meta_data.relationship_dict,
)
if image_id in meta_data.image_sizes:
width, height = meta_data.image_sizes[image_id]
else:
target = meta_data.relationship_dict[image_id]
width, height = _get_image_size_from_image(target)
# Make sure the width and height are not zero
if all((width, height)):
return '<img src="%s" height="%d" width="%d" />' % (
src,
height,
width,
)
else:
return '<img src="%s" />' % src
return ''
def get_text_run_content(el, meta_data, remove_bold, remove_italics):
w_namespace = get_namespace(el, 'w')
text_output = ''
for child in get_text_run_content_data(el):
if child.tag == '%st' % w_namespace:
text_output += get_t_tag_content(
child,
el,
remove_bold,
remove_italics,
meta_data,
)
elif child.tag == '%sbr' % w_namespace:
text_output += '<br />'
elif child.tag in (
'%spict' % w_namespace,
'%sdrawing' % w_namespace,
):
text_output += build_image(child, meta_data)
else:
raise SyntaxNotSupported(
'"%s" is not a supported content-containing '
'text run child.' % child.tag
)
return text_output
@ensure_tag(['p', 'ins', 'smartTag', 'hyperlink'])
def get_element_content(
p,
meta_data,
is_td=False,
remove_italics=False,
remove_bold=False,
):
"""
P tags are made up of several runs (r tags) of text. This function takes a
p tag and constructs the text that should be part of the p tag.
image_handler should be a callable that returns the desired ``src``
attribute for a given image.
"""
# Only remove bold or italics if this tag is an h tag.
# Td elements have the same look and feel as p/h elements. Right now we are
# never putting h tags in td elements, as such if we are in a td we will
# never be stripping bold/italics since that is only done on h tags
if not is_td and is_header(p, meta_data):
# Check to see if the whole line is bold or italics.
remove_bold, remove_italics = whole_line_styled(p)
p_text = ''
w_namespace = get_namespace(p, 'w')
if len(p) == 0:
return ''
# Only these tags contain text that we care about (eg. We don't care about
# delete tags)
content_tags = (
'%sr' % w_namespace,
'%shyperlink' % w_namespace,
'%sins' % w_namespace,
'%ssmartTag' % w_namespace,
)
elements_with_content = []
for child in p:
if child is None:
break
if child.tag in content_tags:
elements_with_content.append(child)
# Gather the content from all of the children
for el in elements_with_content:
# Hyperlinks and insert tags need to be handled differently than
# r and smart tags.
if el.tag in ('%sins' % w_namespace, '%ssmartTag' % w_namespace):
p_text += get_element_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
elif el.tag == '%shyperlink' % w_namespace:
p_text += build_hyperlink(el, meta_data)
elif el.tag == '%sr' % w_namespace:
p_text += get_text_run_content(
el,
meta_data,
remove_bold=remove_bold,
remove_italics=remove_italics,
)
else:
raise SyntaxNotSupported(
'Content element "%s" not handled.' % el.tag
)
# This function does not return a p tag since other tag types need this as
# well (td, li).
return p_text
def _strip_tag(tree, tag):
"""
Remove all tags that have the tag name ``tag``
"""
for el in tree.iter():
if el.tag == tag:
el.getparent().remove(el)
def get_zip_file_handler(file_path):
return ZipFile(file_path)
def read_html_file(file_path):
with open(file_path) as f:
html = f.read()
return html
def convert(file_path, image_handler=None, fall_back=None, converter=None):
"""
``file_path`` is a path to the file on the file system that you want to be
converted to html.
``image_handler`` is a function that takes an image_id and a
relationship_dict to generate the src attribute for images. (see readme
for more details)
``fall_back`` is a function that takes a ``file_path``. This function will
only be called if for whatever reason the conversion fails.
``converter`` is a function to convert a document that is not docx to docx
(examples in docx2html.converters)
Returns html extracted from ``file_path``
"""
file_base, extension = os.path.splitext(os.path.basename(file_path))
if extension == '.html' or extension == '.htm':
return read_html_file(file_path)
# Create the converted file as a file in the same dir with the
# same name only with a .docx extension
docx_path = replace_ext(file_path, '.docx')
if extension == '.docx':
# If the file is already html, just leave it in place.
docx_path = file_path
else:
if converter is None:
raise FileNotDocx('The file passed in is not a docx.')
converter(docx_path, file_path)
if not os.path.isfile(docx_path):
if fall_back is None:
raise ConversionFailed('Conversion to docx failed.')
else:
return fall_back(file_path)
try:
# Docx files are actually just zip files.
zf = get_zip_file_handler(docx_path)
except BadZipfile:
raise MalformedDocx('This file is not a docx')
# Need to populate the xml based on word/document.xml
tree, meta_data = _get_document_data(zf, image_handler)
return create_html(tree, meta_data)
def create_html(tree, meta_data):
# Start the return value
new_html = etree.Element('html')
w_namespace = get_namespace(tree, 'w')
visited_nodes = []
_strip_tag(tree, '%ssectPr' % w_namespace)
for el in tree.iter():
# The way lists are handled could double visit certain elements; keep
# track of which elements have been visited and skip any that have been
# visited already.
if el in visited_nodes:
continue
header_value = is_header(el, meta_data)
if is_header(el, meta_data):
p_text = get_element_content(el, meta_data)
if p_text == '':
continue
new_html.append(
etree.XML('<%s>%s</%s>' % (
header_value,
p_text,
header_value,
))
)
elif el.tag == '%sp' % w_namespace:
# Strip out titles.
if is_title(el):
continue
if is_li(el, meta_data):
# Parse out the needed info from the node.
li_nodes = get_single_list_nodes_data(el, meta_data)
new_el, list_visited_nodes = build_list(
li_nodes,
meta_data,
)
visited_nodes.extend(list_visited_nodes)
# Handle generic p tag here.
else:
p_text = get_element_content(el, meta_data)
# If there is not text do not add an empty tag.
if p_text == '':
continue
new_el = etree.XML('<p>%s</p>' % p_text)
new_html.append(new_el)
elif el.tag == '%stbl' % w_namespace:
table_el, table_visited_nodes = build_table(
el,
meta_data,
)
visited_nodes.extend(table_visited_nodes)
new_html.append(table_el)
continue
# Keep track of visited_nodes
visited_nodes.append(el)
result = etree.tostring(
new_html,
method='html',
with_tail=True,
)
return _make_void_elements_self_close(result)
def _make_void_elements_self_close(html):
#XXX Hack not sure how to get etree to do this by default.
void_tags = [
r'br',
r'img',
]
for tag in void_tags:
regex = re.compile(r'<%s.*?>' % tag)
matches = regex.findall(html)
for match in matches:
new_tag = match.strip('<>')
new_tag = '<%s />' % new_tag
html = re.sub(match, new_tag, html)
return html
|
bsd-3-clause
| 5,576,685,716,385,988,000
| 31.326846
| 79
| 0.567048
| false
| 3.736193
| false
| false
| false
|
pymir3/pymir3
|
mir3/modules/supervised/linear/decomposer/beta_nmf.py
|
1
|
19321
|
import argparse
import numpy
import numpy.random
import mir3.data.linear_decomposition as ld
import mir3.data.metadata as md
import mir3.data.spectrogram as spectrogram
import mir3.module
# TODO: maybe split this into 2 modules to compute activation and
# basis+activation
class BetaNMF(mir3.module.Module):
def get_help(self):
return """use beta nmf algorithm to compute the activations"""
def build_arguments(self, parser):
parser.add_argument('-b','--beta', type=float, default=2., help="""beta
value to be used by the algorithm (default:
%(default)s)""")
parser.add_argument('-i','--max-iterations', type=int, default=100,
help="""maximum number of iterations""")
parser.add_argument('-d','--min-delta', type=float, default=0.,
help="""minimum difference between iterations to
consider convergence""")
parser.add_argument('-B','--basis', type=argparse.FileType('rb'),
help="""basis file to be used""")
parser.add_argument('-s','--size', nargs=3, metavar=('SIZE',
'INSTRUMENT', 'NOTE'), help="""size of the
decomposition and instrument and note names to be
used for the basis. 'INSTRUMENT' or 'NOTE' can be
set to 'None' or 'null' to ignore that parameter""")
parser.add_argument('piece', nargs='+', help="""piece spectrogram
file""")
parser.add_argument('outfile', type=argparse.FileType('wb'),
help="""linear decomposition file""")
def run(self, args):
# Loads basis if present
if args.basis is not None:
b = ld.LinearDecomposition().load(args.basis)
else:
b = None
if args.basis is not None and b.data.right != {}:
print "Basis doesn't have empty right side. Ignoring it."
# Size of the decomposition (used when finding a basis too)
if args.size is None:
args.size = [None, None, None] # Simulate 3 values
for i in range(len(args.size)):
if args.size[i] == 'None' or args.size[i] == 'null':
args.size[i] = None
# Gather input spectrograms
s_list = []
s_meta = []
for filename in args.piece:
with open(filename, 'rb') as handler:
s_list.append(spectrogram.Spectrogram().load(handler))
s_meta.append(md.FileMetadata(handler))
# Converts arguments
size = int(args.size[0]) if args.size[0] is not None else None
instrument = args.size[1] if args.size[1] is not None else ''
note = args.size[2] if args.size[2] is not None else ''
# Decompose
d = self.compute(s_list,
size,
instrument,
note,
b,
args.beta,
args.min_delta,
args.max_iterations,
False)
# Associates an activation metadata with its given spectrogram's
# metadata
for k, data, metadata in d.right():
metadata.spectrogram_input = s_meta[k[-1]]
# Checks if basis was provided
if b is not None:
# If provided, adds it as basis metadata for each activation
meta = md.FileMetadata(args.basis)
for k, data, metadata in d.right():
metadata.basis_input = meta
else:
# Otherwise, the basis was computed right now, so we set its
# metadata with the list of all spectrograms' metadata
d.metadata.left[(args.size[1], args.size[2])].spectrogram_input = \
s_meta
d.save(args.outfile)
def compute(self, spectrograms, size=None, instrument=None, note=None,
basis=None, beta=2., min_delta=0., max_iterations=100,
save_metadata=True):
"""Computes the activation matrix from a basis matrix and a spectrogram.
Uses the beta divergence to compute the activations.
If min_delta is zero, the code may run faster because no beta divergence
is actually computed. Otherwise, the code stops computing if two
iterations of the algorithm don't improve the result by more than
min_delta.
Only one of 'basis' and 'size' arguments may be set, as they specify
different things. With 'size', the user extracts both a basis and an
activation from the spectrogram, while with 'basis' only an activation
is computed.
Each activation computed has the same key as the corresponding basis
plus the spectrogram's index in the list provided.
If a basis is being created, it's name is a tuple of (instrument, note),
even if they are None.
Args:
spectrograms: list of Spectrograms to be merged and used to compute
the activations.
size: Number of basis to extract from the spectrogram. Must be None
if the 'basis' argument is defined.
instrument: Name of the instrument. This is used only if size is
set. If None, it's ignored. Default: None.
note: Name of the note. This is used only if size is set. If None,
it's ignored. Default: None.
basis: LinearDecomposition object describing the basis to be used.
Must be none if the 'size' argument is defined.
beta: value for the beta used in divergence. Default: 2.
min_delta: threshold for early stop. Default: 0.
max_iterations: maximum number of iterations to use. Default: 100.
save_metadata: flag indicating whether the metadata should be
computed. Default: True.
Returns:
LinearDecomposition object with basis and activations for the
spectrograms.
Raises:
ValueError: matrices have incompatible sizes.
"""
# Check arguments compatibility
if size is None and basis is None:
raise ValueError("One of 'size' or 'basis' must not be None.")
if basis is not None and size is not None:
raise ValueError("Only one of 'size' or 'basis' must not be None.")
# Saves metadata
if save_metadata:
s_meta = [md.ObjectMetadata(s) for s in spectrograms]
else:
s_meta = [None for s in spectrograms]
# Marks the limits of each spectrogram
X_start = [0]
for s in spectrograms:
X_start.append(X_start[-1]+s.data.shape[1])
# Merges spectrograms
X = numpy.hstack([s.data for s in spectrograms])
# If we have a basis, we only need to compute the activations
if basis is not None:
# Merges basis but keep track where each one starts so that it can
# be used to characterize the activations
B = []
B_start = [0]
for k, data, metadata in basis.left():
B.append(data)
B_start.append(B_start[-1]+data.shape[1])
B = numpy.hstack(B)
# Saves metadata
if save_metadata:
b_meta = md.ObjectMetadata(B)
else:
b_meta = None
# Initilizes activations
A = numpy.ones((B.shape[1], X.shape[1]))
# Computes the activation
self.compute_activation(X, B, A, beta, min_delta, max_iterations)
# Starts creating the decomposition object
d = ld.LinearDecomposition()
# Copy the left stuff from the basis, since they came from there
d.data.left = basis.data.left
d.metadata.left = basis.metadata.left
# Cuts the activation. For each combination of basis and
# spectrograms, we get an activation
i = 0
for k, data, metadata in basis.left():
for j in range(len(spectrograms)):
# Since spectrograms don't have name, we call it by its
# sequence number
s_name = (j,)
# Cuts the activation
A_cut = A[B_start[i]:B_start[i+1], X_start[j]:X_start[j+1]]
# Merges the basis key with the spectrogram name to create a
# key for the activation. Then stores a lot of metadata
# about what was used to compute it.
d.add(k+s_name,
right=A_cut,
right_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta[j],
spectrogram=s.metadata,
basis_input=b_meta,
basis=metadata))
# Increase basis iterator
i += 1
else:
# Everyone gets the same matrices to work with every time, so we
# avoid consistency problems. However, we can't have the same values
# filling the matrices or the algorithm can't separate the basis and
# activations (everyone keeps getting the same value).
numpy.random.seed(0)
B = numpy.random.rand(X.shape[0], size)
A = numpy.random.rand(size, X.shape[1])
# Computes both basis and activations
self.compute_both(X, B, A, beta, min_delta, max_iterations)
# Key for the basis created
key = (instrument, note)
# Starts creating the decomposition object
d = ld.LinearDecomposition()
# Adds basis
d.add(key,
left=B,
left_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta,
spectrogram=[s.metadata for s in spectrograms]))
# Adds the activations cutted to match the spectrograms
for j in range(len(spectrograms)):
# Since spectrograms don't have name, we call it by its sequence
# number
s = spectrograms[j]
s_name = (j,)
# Cuts the activation
A_cut = A[:, X_start[j]:X_start[j+1]]
# Merges the basis key with the spectrogram name to create a key
# for the activation. Then stores a lot of metadata about what
# was used to compute it.
d.add(key+s_name,
right=A_cut,
right_metadata=md.Metadata(
method="beta_nmf",
beta=beta,
min_delta=min_delta,
max_iterations=max_iterations,
spectrogram_input=s_meta[j],
spectrogram=s.metadata))
return d
def compute_both(self, X, B, A, beta=2., min_delta=0., max_iterations=100):
"""Computes both the basis and activation.
Args:
X: matrix to be approximated.
B: initial guess for B.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
min_delta: minimum improvement necessary for the algorithm to
continue. Default: 0.
max_iterations: maximum number of iterations. Default: 100;
Raises:
ValueError: matrices have incompatible sizes.
"""
# Checks shapes match
if X.shape[0] != B.shape[0] or X.shape[1] != A.shape[1]:
raise ValueError("Incompatible matrix sizes: %r = %r * %r." %
(X.shape, B.shape, A.shape))
# Makes decomposition
self.beta_nmf(1e-6+X, # Avoids near-zero values
B,
A,
beta=beta,
update_B=True,
update_A=True,
min_delta=min_delta,
max_iterations=max_iterations)
def compute_activation(self, X, B, A, beta=2., min_delta=0.,
max_iterations=100):
"""Computes both the activation for a given basis.
Args:
X: matrix to be approximated.
B: basis to be used.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
min_delta: minimum improvement necessary for the algorithm to
continue. Default: 0.
max_iterations: maximum number of iterations. Default: 100.
Raises:
ValueError: matrices have incompatible sizes.
"""
# Checks shapes match
if X.shape[0] != B.shape[0] or X.shape[1] != A.shape[1]:
raise ValueError("Incompatible matrix sizes: %r = %r * %r." %
(X.shape, B.shape, A.shape))
# Computes 100 activations at the same time for speed
# TODO: make this a parameter
step = 100
for i in range(0,X.shape[1],step):
self.beta_nmf(1e-6+X[:,i:i+step], # Avoids near-zero values
B,
A[:,i:i+step],
beta=beta,
update_B=False,
update_A=True,
min_delta=min_delta,
max_iterations=max_iterations)
def betadivergence(self, x, y, beta=2.0):
"""Computes the beta-divergence d(x|y).
The beta-divergence, as defined by Eguchi and Kano [1], is given by:
1/(beta*(beta-1)))*(x**b + (beta-1)*y**(beta) - beta*x*(y**(beta-1))),
if beta is not 0 or 1;
x * log(x/y) + (y-x), if beta=1
(x/y) - log(x/y) - 1, if beta=0
The special cases for the beta divergence are:
beta=0 -> Itakura-Saito divergence
beta=1 -> Kullback-Leibler divergence
beta=2 -> Euclidean distance
Args:
x: left side of the divergence
y: right side of the divergence
beta: value of beta used to compute. Default: 2.
Returns:
Divergence value.
"""
# Common values of beta with faster evaluation
if beta == 1:
return numpy.sum(x * numpy.log(x/y) + (y-x))
elif beta == 0:
return numpy.sum((x/y) - numpy.log(x/y) - 1)
elif beta == 2:
return numpy.sum((x-y)**2)/2.
# Magic formula for beta-divergence
beta = float(beta)
d = (1/(beta*(beta-1))) * \
numpy.sum((x**beta)+(beta-1)*(y**beta)-beta*x*(y**(beta-1)))
return d
def beta_nmf_step(self, X, B, A, beta=2.0, update_B=False, update_A=True):
"""Computes a step of a non-negative factorization towards X using B and
A as initial conditions.
X = B * A
The matrices A and B are updated in place, so any previous value is
destroyed. Because of convergence problems, only one update is performed
at a time, with A update having priority. If you want to update both,
call this twice.
Returns B, A and the error after the step was taken. Uses the
multiplicative approach as defined in:
Cedric Fevotte and Jerome Idier: Algorithms for nonnegative matrix
factorization with the beta-divergence (pg 13, eqs. 67 and 68)
Download paper at http://arxiv.org/pdf/1010.1763v3.pdf
Args:
X: matrix to be approximated.
B: initial guess for B.
A: initial guess for A.
beta: value of beta to be used. Default: 2.
update_B: flag indicating that the value of B should be updated.
Default: False.
update_A: flag indicating that the value of A should be updated.
Default: False.
"""
# Computes current approximation
Xtil = numpy.dot(B,A)
# Auxiliary variables for speed
Xtil2 = Xtil**(beta-2)
XtilNum = Xtil2*X
XtilDen = Xtil2*Xtil
if update_A:
A_numerator = numpy.dot(B.transpose(), XtilNum)
A_denominator = numpy.dot(B.transpose(), XtilDen)
A *= A_numerator/A_denominator
elif update_B:
B_numerator = numpy.dot(XtilNum, A.transpose())
B_denominator = numpy.dot(XtilDen, A.transpose())
B *= B_numerator/B_denominator
def beta_nmf(self, X, B, A, beta, update_B, update_A, min_delta,
max_iterations):
"""Performs non-negative matrix factorization for X=BA using a
beta-divergence.
The algorithm stops if either the number of iterations exceed a maximum
or the improvement is less the a threshold.
If minDelta is 0, no beta divergence is computed and the algorithm may
run faster!
The values of B and A are updated in place.
Args:
X: matrix to be approximated.
B: initial guess for B.
A: initial guess for A.
beta: value of beta to be used.
updateB: flag indicating that the value of B should be updated.
updateA: flag indicating that the value of A should be updated.
minDelta: minimum improvement necessary for the algorithm to
continue.
maxIterations: maximum number of iterations
"""
# If tolerance is zero, we can skip beta divergence computation. This
# may increase speed a lot
min_delta_is_zero = (min_delta == 0)
# If we have a tolerance, compute initial values to check for
# convergence
if not min_delta_is_zero:
last_delta = 2*min_delta
curr_err = self.betadivergence(X, numpy.dot(B,A), beta)
n_iterations = 0
while (min_delta_is_zero or last_delta > min_delta) and \
n_iterations < max_iterations:
# Update the chosen matrices
if update_B and update_A:
self.beta_nmf_step(X, B, A, beta, False, True)
self.beta_nmf_step(X, B, A, beta, True, False)
else:
self.beta_nmf_step(X, B, A, beta, update_B, update_A)
# If tolerance isn't zero, we need to check for convergence
if not min_delta_is_zero:
new_err = self.betadivergence(X, numpy.dot(B, A), beta)
last_delta = curr_err-new_err
curr_err = new_err
n_iterations = n_iterations + 1
|
mit
| 3,395,723,832,904,271,400
| 39.420502
| 80
| 0.537239
| false
| 4.367315
| false
| false
| false
|
slacker007/OFF-ToolKit
|
Modules/Correlation.py
|
1
|
1103
|
from Builders import wigile_query
from Builders import kml_builder
class ClassName():
#These are the options we will set
def __init__(self):
# Descriptions that are required!!!
self.name = "Registry Network info corelation"
self.description = "WIGLE Query your known BSSID"
self.language = "python"
self.extension = "py"
self.rating = "Excellent"
# options we require user interaction for- format is {Option : [Value, Description]]}
self.required_options = {"bssid" : ['00:22:55:DF:C8:01', "Set BSSID or MAC of AP"],
"user" : ['offtest', "Set Username to WIGLE"],
"pass" : ['83128312', "Set Password to WIGLE"]}
def startx(self):
wa = wigile_query.WigleAgent(self.required_options["user"][0], self.required_options["pass"][0])
final = wa.get_lat_lng(self.required_options["bssid"][0])
print final
kml = kml_builder.kml()
kml.build(final["lat"], final["lng"], final["bssid"]) #Pass SSID name of network
print "[*] Check output"
|
gpl-2.0
| -6,857,196,701,473,363,000
| 41.423077
| 102
| 0.600181
| false
| 3.592834
| false
| false
| false
|
ContinuumIO/dask
|
setup.py
|
2
|
2210
|
#!/usr/bin/env python
import sys
from os.path import exists
from setuptools import setup
import versioneer
# NOTE: These are tested in `continuous_integration/travis/test_imports.sh` If
# you modify these, make sure to change the corresponding line there.
extras_require = {
"array": ["numpy >= 1.13.0", "toolz >= 0.8.2"],
"bag": [
"cloudpickle >= 0.2.2",
"fsspec >= 0.6.0",
"toolz >= 0.8.2",
"partd >= 0.3.10",
],
"dataframe": [
"numpy >= 1.13.0",
"pandas >= 0.23.0",
"toolz >= 0.8.2",
"partd >= 0.3.10",
"fsspec >= 0.6.0",
],
"distributed": ["distributed >= 2.0"],
"diagnostics": ["bokeh >= 1.0.0"],
"delayed": ["cloudpickle >= 0.2.2", "toolz >= 0.8.2"],
}
extras_require["complete"] = sorted({v for req in extras_require.values() for v in req})
install_requires = ["pyyaml"]
packages = [
"dask",
"dask.array",
"dask.bag",
"dask.bytes",
"dask.dataframe",
"dask.dataframe.io",
"dask.dataframe.tseries",
"dask.diagnostics",
]
tests = [p + ".tests" for p in packages]
# Only include pytest-runner in setup_requires if we're invoking tests
if {"pytest", "test", "ptr"}.intersection(sys.argv):
setup_requires = ["pytest-runner"]
else:
setup_requires = []
setup(
name="dask",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Parallel PyData with Task Scheduling",
url="https://github.com/dask/dask/",
maintainer="Matthew Rocklin",
maintainer_email="mrocklin@gmail.com",
license="BSD",
keywords="task-scheduling parallel numpy pandas pydata",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=packages + tests,
long_description=open("README.rst").read() if exists("README.rst") else "",
python_requires=">=3.6",
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=["pytest"],
extras_require=extras_require,
include_package_data=True,
zip_safe=False,
)
|
bsd-3-clause
| -553,675,897,247,055,000
| 27.701299
| 88
| 0.60724
| false
| 3.303438
| true
| false
| false
|
whtsky/catsup-docs-zh
|
catsup/generator/__init__.py
|
1
|
4323
|
import time
import os
import catsup.parser
from catsup.logger import logger
from catsup.generator.renderer import Renderer
from catsup.reader import get_reader
from catsup.options import g
from catsup.utils import smart_copy
from catsup.models import *
class Generator(object):
def __init__(self, config_path, local=False, base_url=None):
self.config_path = config_path
self.local = local
self.base_url = base_url
g.generator = self
self.posts = []
self.pages = []
self.non_post_files = []
self.archives = []
self.tags = []
self.caches = []
self.config = {}
self.renderer = None
self.reset()
def reset(self):
self.posts = []
self.pages = []
self.non_post_files = []
self.archives = g.archives = Archives()
self.tags = g.tags = Tags()
self.load_config()
self.load_posts()
self.load_renderer()
self.caches = {
"static_url": {},
"url_for": {}
}
def load_config(self):
self.config = g.config = catsup.parser.config(
self.config_path,
local=self.local,
base_url=self.base_url
)
def load_posts(self):
for f in os.listdir(g.source):
if f.startswith("."): # hidden file
continue
filename, ext = os.path.splitext(f)
ext = ext.lower()[1:]
reader = get_reader(ext)
if reader is not None:
logger.info('Loading file %s' % filename)
path = os.path.join(g.source, f)
post = reader(path)
if post.type == "page":
self.pages.append(post)
else:
self.posts.append(post)
else:
self.non_post_files.append(f)
self.posts.sort(
key=lambda x: x.datetime,
reverse=True
)
def load_renderer(self):
templates_path = [
g.public_templates_path,
os.path.join(g.theme.path, 'templates')
]
self.renderer = Renderer(
templates_path=templates_path,
generator=self
)
def generate_feed(self):
feed = Feed(self.posts)
feed.render(self.renderer)
def generate_pages(self):
page = Page(self.posts)
page.render_all(self.renderer)
def generate_posts(self):
for post in self.posts:
post.add_archive_and_tags()
post.render(self.renderer)
for page in self.pages:
page.render(self.renderer)
def generate_tags(self):
self.tags.render(self.renderer)
def generate_archives(self):
self.archives.render(self.renderer)
def generate_other_pages(self):
NotFound().render(self.renderer)
def copy_static_files(self):
static_path = self.config.config.static_output
smart_copy(
os.path.join(g.theme.path, 'static'),
static_path
)
smart_copy(
self.config.config.static_source,
static_path
)
for f in self.non_post_files:
smart_copy(
os.path.join(g.source, f),
os.path.join(self.config.config.output, f)
)
def generate(self):
started_loading = time.time()
self.reset()
finish_loading = time.time()
logger.info(
"Loaded config and %s posts in %.3fs" %
(len(self.posts), finish_loading - started_loading)
)
if self.posts:
self.generate_posts()
self.generate_tags()
self.generate_archives()
self.generate_feed()
self.generate_pages()
else:
logger.warning("Can't find any post.")
self.generate_other_pages()
self.copy_static_files()
self.renderer.render_sitemap()
finish_generating = time.time()
logger.info(
"Generated %s posts in %.3fs" %
(len(self.posts), finish_generating - finish_loading)
)
logger.info(
"Generating finished in %.3fs" %
(finish_generating - started_loading)
)
|
mit
| 2,849,377,199,365,245,000
| 27.440789
| 65
| 0.53065
| false
| 3.988007
| true
| false
| false
|
skygeek/skyproc
|
data_server/data/model_Archives.py
|
1
|
3610
|
# -*- coding: utf-8 -*-
# Copyright 2012, Nabil SEFRIOUI
#
# This file is part of Skyproc.
#
# Skyproc is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or any later version.
#
# Skyproc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skyproc. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models import Max
from django.conf import settings
import base
import fields
from choices import *
class LoadLog(base.ArchiveModel):
archive = True
location = fields.UUIDField()
aircraft = fields.UUIDField()
pilot = fields.UUIDField()
aircraft_reg = models.CharField(max_length=16)
pilot_name = models.CharField(max_length=64)
date = models.DateField()
number = models.SmallIntegerField()
total_slots = models.SmallIntegerField()
prepaid_slots = models.SmallIntegerField()
postpaid_slots = models.SmallIntegerField()
unpaid_slots = models.SmallIntegerField()
staff_slots = models.SmallIntegerField()
prices = models.CharField(max_length=512)
note = models.CharField(max_length=200, blank=True, null=True)
class SlotLog(base.ArchiveModel):
archive = True
load = models.ForeignKey('LoadLog')
jumper = fields.UUIDField(blank=True, null=True)
jumper_name = models.CharField(max_length=64)
is_worker = models.BooleanField(default=False)
catalog_item = models.CharField(max_length=64)
exit_order = models.SmallIntegerField()
catalog_price = models.CharField(max_length=64, blank=True, null=True)
payer = models.CharField(max_length=64, blank=True, null=True)
payment = models.CharField(max_length=64)
payment_type = models.CharField(max_length=16)
class JumpLog(base.ArchiveModel):
archive = True
location = fields.UUIDField(blank=True, null=True)
number = models.IntegerField(blank=True, null=True)
location_name = models.CharField(max_length=64, blank=True, null=True)
aircraft_type = models.CharField(max_length=32, blank=True, null=True)
date = models.DateField()
jump_type = models.CharField(max_length=32, blank=True, null=True)
altitude = models.CharField(max_length=32, blank=True, null=True)
note = models.CharField(max_length=100, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.number:
max_number = JumpLog.objects.filter(owner=self.owner).aggregate(Max('number'))['number__max']
if max_number is None:
try:
person = models.get_model(settings.DATA_APP, 'Person').objects.get_by_natural_key(self.owner)
past_jumps = person.past_jumps
except: past_jumps = 0
self.number = past_jumps+1
else: self.number = max_number+1
super(JumpLog, self).save(*args, **kwargs)
class AccountOperationLog(base.ArchiveModel):
archive = True
location = fields.UUIDField()
date = models.DateField()
type = models.CharField(max_length=1, choices=ACCOUNT_OPERATIONS)
amount = models.CharField(max_length=64)
currency = models.CharField(max_length=5)
note = models.CharField(max_length=100)
|
agpl-3.0
| -8,264,354,205,025,495,000
| 38.67033
| 113
| 0.69446
| false
| 3.694985
| false
| false
| false
|
orting/emphysema-estimation
|
Experiments/02-ScalabilityOfClusteringAlgorithm/Scripts/RunScalability-1.py
|
1
|
2142
|
#!/usr/bin/python3
'''Run Scalability-1.
See README.md for details.
'''
import sys, subprocess, os.path
from Util import intersperse
def main():
skip = {
'Measure' : False,
}
basedir = ''
dirs = {
'Instances' : os.path.join(basedir, 'Data', 'Instances'),
'Statistics' : os.path.join(basedir, 'Data', 'Statistics', 'Scalability-1'),
'Bin' : '../../Build',
}
files = {
'Instances' : [
os.path.join(dirs['Instances'], 'instances500.csv'),
os.path.join(dirs['Instances'], 'instances1000.csv'),
os.path.join(dirs['Instances'], 'instances2500.csv'),
os.path.join(dirs['Instances'], 'instances5000.csv'),
os.path.join(dirs['Instances'], 'instances7500.csv'),
os.path.join(dirs['Instances'], 'instances10000.csv')
],
}
progs = {
'Scalability' : os.path.join(dirs['Bin'],'Experiments/02-ScalabilityOfClusteringAlgorithm/Scalability'),
}
params = {
'clusters' : [4, 8, 16, 32, 64],
'histograms' : 7*8, # 7 scales * 8 features
'burnin' : 10,
'iterations' : 100,
'branching' : 1,
}
if skip['Measure']:
print( 'Skipping: Measure' )
else:
print( 'Measuring' )
for instanceMatrix in files['Instances']:
args = [
progs['Scalability'],
'--input', instanceMatrix,
'--nHistograms', "%d" % params['histograms'],
'--output', os.path.join(dirs['Statistics'], 'stats_' + os.path.basename(instanceMatrix)),
'--burnin', "%d" % params['burnin'],
'--iterations', "%d" % params['iterations'],
'--branching', "%d" % params['branching']
] + list(intersperse('--clusters', ("%d" % k for k in params['clusters'])))
print(' '.join(args))
if subprocess.call( args ) != 0:
print( 'Error measuring', instanceMatrix )
return 1
return 0
if __name__ == '__main__':
sys.exit( main() )
|
gpl-3.0
| -2,892,246,233,755,231,000
| 30.970149
| 112
| 0.505602
| false
| 3.852518
| false
| false
| false
|
hideoussquid/aureus-12-bitcore
|
contrib/linearize/linearize-hashes.py
|
1
|
3034
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class AureusRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = AureusRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
mit
| 1,344,201,614,436,289,500
| 25.849558
| 90
| 0.663481
| false
| 2.948494
| false
| false
| false
|
osgee/redigit
|
redigit/settings.py
|
1
|
2956
|
"""
Django settings for redigit project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SETTINGS_PATH = os.path.realpath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7p8)$=frp+7336ak^oo1verce)=ywu(&of@qvrvylw4%!kpeak'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'redigit.urls'
WSGI_APPLICATION = 'redigit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'redigit/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEMPLATE_DIRS = (
os.path.join(SETTINGS_PATH, 'templates'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
|
apache-2.0
| -2,283,062,519,946,228,700
| 24.491379
| 71
| 0.688769
| false
| 3.397701
| false
| false
| false
|
tobi-wan-kenobi/bumblebee-status
|
bumblebee_status/modules/contrib/twmn.py
|
1
|
1236
|
# pylint: disable=C0111,R0903
"""Toggle twmn notifications.
Requires the following executable:
* systemctl
contributed by `Pseudonick47 <https://github.com/Pseudonick47>`_ - many thanks!
"""
import core.module
import core.widget
import core.input
import core.decorators
import util.cli
class Module(core.module.Module):
@core.decorators.every(minutes=60)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(""))
self.__paused = False
# Make sure that twmn is currently not paused
util.cli.execute("killall -SIGUSR2 twmnd", ignore_errors=True)
core.input.register(self, button=core.input.LEFT_MOUSE, cmd=self.toggle_status)
def toggle_status(self, event):
self.__paused = not self.__paused
try:
if self.__paused:
util.cli.execute("systemctl --user start twmnd")
else:
util.cli.execute("systemctl --user stop twmnd")
except:
self.__paused = not self.__paused # toggling failed
def state(self, widget):
if self.__paused:
return ["muted"]
return ["unmuted"]
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
mit
| -1,048,578,473,358,335,700
| 25.869565
| 87
| 0.632686
| false
| 3.734139
| false
| false
| false
|
xArm-Developer/xArm-Python-SDK
|
example/wrapper/xarm7/2004-move_joint.py
|
1
|
2851
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
"""
Description: Move Joint
"""
import os
import sys
import time
import math
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from xarm.wrapper import XArmAPI
#######################################################
"""
Just for test example
"""
if len(sys.argv) >= 2:
ip = sys.argv[1]
else:
try:
from configparser import ConfigParser
parser = ConfigParser()
parser.read('../robot.conf')
ip = parser.get('xArm', 'ip')
except:
ip = input('Please input the xArm ip address:')
if not ip:
print('input error, exit')
sys.exit(1)
########################################################
arm = XArmAPI(ip, is_radian=True)
arm.motion_enable(enable=True)
arm.set_mode(0)
arm.set_state(state=0)
arm.reset(wait=True)
speed = 50
arm.set_servo_angle(servo_id=1, angle=90, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=-60, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=-30, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=1, angle=0, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=0, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=0, speed=speed, is_radian=False, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.reset(wait=True)
speed = math.radians(50)
arm.set_servo_angle(servo_id=1, angle=math.radians(90), speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=math.radians(-60), speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=math.radians(-30), speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=1, angle=0, speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=3, angle=0, speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.set_servo_angle(servo_id=2, angle=0, speed=speed, wait=True)
print(arm.get_servo_angle(), arm.get_servo_angle(is_radian=False))
arm.reset(wait=True)
arm.disconnect()
|
bsd-3-clause
| -8,415,697,118,851,235,000
| 33.768293
| 83
| 0.677306
| false
| 2.762597
| false
| false
| false
|
laenderoliveira/exerclivropy
|
exercicios_resolvidos/capitulo 05/exercicio-05-28.py
|
1
|
1772
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2014
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/1012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: exercicios_resolvidos\capitulo 05\exercicio-05-28.py
##############################################################################
# Exercício 5.27
# Solução alternativa, usando apenas inteiros
n = int(input ("Digite o número a verificar:"))
# Com n é um número inteiro, vamos calcular sua
# quantidade de dígitos, encontrado a primeira
# potência de 10, superior a n.
# Exemplo: 341 - primeira potência de 10 maior: 1000 = 10 ^ 4
# Utilizaremos 4 e não 3 para possibilitar o tratamento de números
# com um só dígito. O ajuste é feito nas fórmulas abaixo
q = 0
while 10 ** q < n:
q = q + 1
i = q
f = 0
nf = ni = n # Aqui nós copiamos n para ni e nf
pi = pf = 0 # e fazemos pi = pf (para casos especiais)
while i > f:
pi = int(ni / (10 ** (i-1))) # Dígito mais à direita
pf = nf % 10 # Dígito mais à esquerda
if pi != pf: # Se são diferentes, saímos
break
f = f + 1 # Passamos para o próximo dígito a esqueda
i = i - 1 # Passamos para o dígito a direita seguinte
ni = ni - (pi * (10 ** i )) # Ajustamos ni de forma a retirar o dígito anterior
nf = int(nf / 10) # Ajustamos nf para retirar o último dígito
if pi == pf:
print("%d é palíndromo" % n)
else:
print("%d não é palíndromo" % n)
|
mit
| -280,417,563,851,955,170
| 38.25
| 83
| 0.607991
| false
| 2.524854
| false
| false
| false
|
ToFuProject/tofu
|
tofu/tests/tests01_geom/test_03_core_data/WEST_Ves_VesOut_Notes.py
|
1
|
1355
|
#!/usr/bin/env python
import os
import argparse
import numpy as np
_save = True
_here = os.path.abspath(os.path.dirname(__file__))
_Exp, _Cls, _name = os.path.split(__file__)[1].split('_')[:3]
assert not any([any([ss in s for ss in ['Notes','.']])
for s in [_Exp, _Cls, _name]])
def get_notes():
# Notes from creoView (-X,Z,Y)
notes = {'C': np.r_[2.465, 0.],
'r_in': 3.162/2., # r_out for later use (thick)
'r_out': 3.292/2.}
return notes
def make_Poly(save=_save, path=_here):
notes = get_notes()
C = notes['C']
nP = 100
theta = np.linspace(0.,2*np.pi, nP, endpoint=False)
P = np.array([C[0]+notes['r_out']*np.cos(theta),
C[1]+notes['r_out']*np.sin(theta)])
if save:
cstr = '%s_%s_%s'%(_Exp,_Cls,_name)
pathfilext = os.path.join(path, cstr+'_V0.txt')
np.savetxt(pathfilext, P)
return P, notes
if __name__=='__main__':
# Parse input arguments
msg = 'Launch creation of polygons txt from bash'
parser = argparse.ArgumentParser(description = msg)
parser.add_argument('-save', type=bool, help='save ?', default=_save)
parser.add_argument('-path', type=str, help='saving path ?', default=_here)
args = parser.parse_args()
# Call wrapper function
make_Poly(save=args.save, path=args.path)
|
mit
| -8,711,176,521,942,796,000
| 25.057692
| 79
| 0.57048
| false
| 3.024554
| false
| false
| false
|
miooim/project_hours
|
src/project_hours/driver/mongo_driver.py
|
1
|
2290
|
import datetime
import pprint
__author__ = 'michaell'
import pymongo
from tornado.options import options
class ProjectHoursMongoDriver(object):
"""
Project hours mongo driver implementation
"""
@staticmethod
def get_month_data(month, year, user):
"""
Get results from database
:param month: month
:type month: int
:param year: year
:type year: int
:param user: user name
:type user: str
:return: result dictionary
:rtype: dict
"""
query = {
"$query": {
'month': int(month),
'year': int(year),
'user': user
}
}
# print(options.mongodb, options.mongod_name, options.mongod_name)
# pprint.pprint(query)
collection = pymongo.MongoClient(host=options.mongodb)[options.mongod_name][options.mongod_name]
return collection.find_one(query)
@staticmethod
def save(month, year, user, data):
"""
Saves data to mongod
:param month: month
:type month: int
:param year: year
:type year: int
:param user: user name
:type user: str
:param data: data to save
:type data: dict
:return: true is success
:rtype: bool
"""
for item in data:
if 'no_work' in item:
if item['no_work'] is True:
item['projects'] = []
item['total'] = 0
result = ProjectHoursMongoDriver.get_month_data(month, year, user)
if result:
to_save = {
'_id': result['_id'],
'month': int(month),
'year': int(year),
'user': user,
"days": data,
"timestamp": datetime.datetime.now(),
}
else:
to_save = {
'month': int(month),
'year': int(year),
'user': user,
"days": data,
"timestamp": datetime.datetime.now(),
}
collection = pymongo.MongoClient(host=options.mongodb)[options.mongod_name][options.mongod_name]
return collection.save(to_save=to_save, safe=True)
|
mit
| -589,547,220,272,757,100
| 27.271605
| 104
| 0.501747
| false
| 4.312618
| false
| false
| false
|
asd43/Structural-Variation
|
popgen/getFeatureTable.py
|
1
|
2289
|
#!/usr/bin/env python3
# Copyright (c) 2017 Genome Research Ltd.
# Author: Alistair Dunham
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License , or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful , but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program. If not , see <http :// www.gnu.org/licenses/>.
## Script to convert bedtools cluster output of merged breakdancer calls into a feature table
## which has columns for each sample indicating the presence of each deletion
import argparse
import fileinput
import re
## Read arguments
parser = argparse.ArgumentParser(description="Transform bedtools cluster output for deletion calls into a feature table of 'genotypes'.")
parser.add_argument('tenx',metavar='T',type=str,help="Bed file containing clustered deletion calls")
parser.add_argument('--bd','-b',action='store_true',help="Expect BreakDancer formatted IDs. Otherwise expect 10X formatted IDs.")
args = parser.parse_args()
## Determine function to use for setting sample ID depending on given source format
if args.bd:
def getSample(x):
"""Extract sample from BreakDancer formatted ID tags"""
return(re.split("[_.]",x)[-2])
else:
def getSample(x):
"""Extract sample from 10X formatted ID tags"""
return(x.split('.')[0])
## Extract each deletion call and its cluster number
dels = []
samples = set()
with fileinput.input(args.tenx) as bed:
for li in bed:
t = li.strip().split()
s = getSample(t[3])
n = int(t[4])
samples.add(s)
if len(dels) < n:
dels.append(set([s]))
else:
dels[n - 1].add(s)
## Print feature table
samples = sorted(list(samples))
print("Deletion",*samples,sep='\t')
for n,delSamples in enumerate(dels):
## generate feature string
feats = [(1 if i in delSamples else 0) for i in samples]
print('_'.join(["del",str(n + 1)]),*feats,sep='\t')
|
gpl-3.0
| -661,282,732,030,508,800
| 37.15
| 137
| 0.70817
| false
| 3.361233
| false
| false
| false
|
2-B/etherpad-lite
|
bin/parsejson.py
|
1
|
1119
|
import json
import re
# Regular expression for comments
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
def parse_json(filename):
""" Parse a JSON file
First remove comments and then use the json module package
Comments look like :
// ...
or
/*
...
*/
"""
with open(filename) as f:
content = ''.join(f.readlines())
## Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
#print content
# Return json file
return json.loads(content)
if __name__ == '__main__':
f = '../settings.json'
data = parse_json(f)
print "LOG="+data['log']
print "ERROR_HANDLING="+str(data['errorHandling'])
print "EMAIL_ADDRESS="+data['emailAddress']
print "TIME_BETWEEN_EMAILS="+str(data['timeBetweenEmails'])
print "NODEJS="+data['nodejs']
|
apache-2.0
| -6,526,917,865,432,967,000
| 25.023256
| 69
| 0.540661
| false
| 3.832192
| false
| false
| false
|
bitwiseman/js-beautify
|
python/cssbeautifier/css/options.py
|
1
|
2360
|
#
# The MIT License (MIT)
# Copyright (c) 2007-2018 Einar Lielmanis, Liam Newman, and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from jsbeautifier.core.options import Options as BaseOptions
class BeautifierOptions(BaseOptions):
def __init__(self, options=None):
BaseOptions.__init__(self, options, "css")
self.selector_separator_newline = self._get_boolean(
"selector_separator_newline", True
)
self.newline_between_rules = self._get_boolean("newline_between_rules", True)
brace_style_split = self._get_selection_list(
"brace_style",
["collapse", "expand", "end-expand", "none", "preserve-inline"],
)
self.brace_style = "collapse"
for bs in brace_style_split:
if bs != "expand":
# default to collapse, as only collapse|expand is implemented for now
self.brace_style = "collapse"
else:
self.brace_style = bs
# deprecated
space_around_selector_separator = self._get_boolean(
"space_around_selector_separator"
)
# Continue to accept deprecated option
self.space_around_combinator = (
self._get_boolean("space_around_combinator")
or space_around_selector_separator
)
|
mit
| 4,209,632,697,914,879,500
| 39
| 85
| 0.680508
| false
| 4.322344
| false
| false
| false
|
openstack/python-heatclient
|
heatclient/osc/v1/software_deployment.py
|
1
|
13006
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Orchestration v1 Software Deployment action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions as exc
from osc_lib import utils
from oslo_serialization import jsonutils
from heatclient._i18n import _
from heatclient.common import deployment_utils
from heatclient.common import format_utils
from heatclient.common import utils as heat_utils
from heatclient import exc as heat_exc
class CreateDeployment(format_utils.YamlFormat):
"""Create a software deployment."""
log = logging.getLogger(__name__ + '.CreateDeployment')
def get_parser(self, prog_name):
parser = super(CreateDeployment, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<deployment-name>',
help=_('Name of the derived config associated with this '
'deployment. This is used to apply a sort order to the '
'list of configurations currently deployed to the server.')
)
parser.add_argument(
'--input-value',
metavar='<key=value>',
action='append',
help=_('Input value to set on the deployment. This can be '
'specified multiple times.')
)
parser.add_argument(
'--action',
metavar='<action>',
default='UPDATE',
help=_('Name of an action for this deployment. This can be a '
'custom action, or one of CREATE, UPDATE, DELETE, SUSPEND, '
'RESUME. Default is UPDATE')
)
parser.add_argument(
'--config',
metavar='<config>',
help=_('ID of the configuration to deploy')
)
parser.add_argument(
'--signal-transport',
metavar='<signal-transport>',
default='TEMP_URL_SIGNAL',
help=_('How the server should signal to heat with the deployment '
'output values. TEMP_URL_SIGNAL will create a Swift '
'TempURL to be signaled via HTTP PUT. ZAQAR_SIGNAL will '
'create a dedicated zaqar queue to be signaled using the '
'provided keystone credentials.NO_SIGNAL will result in '
'the resource going to the COMPLETE state without waiting '
'for any signal')
)
parser.add_argument(
'--container',
metavar='<container>',
help=_('Optional name of container to store TEMP_URL_SIGNAL '
'objects in. If not specified a container will be created '
'with a name derived from the DEPLOY_NAME')
)
parser.add_argument(
'--timeout',
metavar='<timeout>',
type=int,
default=60,
help=_('Deployment timeout in minutes')
)
parser.add_argument(
'--server',
metavar='<server>',
required=True,
help=_('ID of the server being deployed to')
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)', parsed_args)
client = self.app.client_manager.orchestration
config = {}
if parsed_args.config:
try:
config = client.software_configs.get(parsed_args.config)
except heat_exc.HTTPNotFound:
msg = (_('Software configuration not found: %s') %
parsed_args.config)
raise exc.CommandError(msg)
derived_params = deployment_utils.build_derived_config_params(
parsed_args.action,
config,
parsed_args.name,
heat_utils.format_parameters(parsed_args.input_value, False),
parsed_args.server,
parsed_args.signal_transport,
signal_id=deployment_utils.build_signal_id(client, parsed_args)
)
derived_config = client.software_configs.create(**derived_params)
sd = client.software_deployments.create(
config_id=derived_config.id,
server_id=parsed_args.server,
action=parsed_args.action,
status='IN_PROGRESS'
)
return zip(*sorted(sd.to_dict().items()))
class DeleteDeployment(command.Command):
"""Delete software deployment(s) and correlative config(s)."""
log = logging.getLogger(__name__ + '.DeleteDeployment')
def get_parser(self, prog_name):
parser = super(DeleteDeployment, self).get_parser(prog_name)
parser.add_argument(
'deployment',
metavar='<deployment>',
nargs='+',
help=_('ID of the deployment(s) to delete.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
hc = self.app.client_manager.orchestration
failure_count = 0
for deploy_id in parsed_args.deployment:
try:
sd = hc.software_deployments.get(deployment_id=deploy_id)
hc.software_deployments.delete(
deployment_id=deploy_id)
except Exception as e:
if isinstance(e, heat_exc.HTTPNotFound):
print(_('Deployment with ID %s not found') % deploy_id)
else:
print(_('Deployment with ID %s failed to delete')
% deploy_id)
failure_count += 1
continue
# just try best to delete the corresponding config
try:
config_id = getattr(sd, 'config_id')
hc.software_configs.delete(config_id=config_id)
except Exception:
print(_('Failed to delete the correlative config'
' %(config_id)s of deployment %(deploy_id)s') %
{'config_id': config_id, 'deploy_id': deploy_id})
if failure_count:
raise exc.CommandError(_('Unable to delete %(count)s of the '
'%(total)s deployments.') %
{'count': failure_count,
'total': len(parsed_args.deployment)})
class ListDeployment(command.Lister):
"""List software deployments."""
log = logging.getLogger(__name__ + '.ListDeployment')
def get_parser(self, prog_name):
parser = super(ListDeployment, self).get_parser(prog_name)
parser.add_argument(
'--server',
metavar='<server>',
help=_('ID of the server to fetch deployments for')
)
parser.add_argument(
'--long',
action='store_true',
help=_('List more fields in output')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
return _list_deployment(heat_client, args=parsed_args)
def _list_deployment(heat_client, args=None):
kwargs = {'server_id': args.server} if args.server else {}
columns = ['id', 'config_id', 'server_id', 'action', 'status']
if args.long:
columns.append('creation_time')
columns.append('status_reason')
deployments = heat_client.software_deployments.list(**kwargs)
return (
columns,
(utils.get_item_properties(s, columns) for s in deployments)
)
class ShowDeployment(command.ShowOne):
"""Show SoftwareDeployment Details."""
log = logging.getLogger(__name__ + ".ShowSoftwareDeployment")
def get_parser(self, prog_name):
parser = super(ShowDeployment, self).get_parser(prog_name)
parser.add_argument(
'deployment',
metavar='<deployment>',
help=_('ID of the deployment')
)
parser.add_argument(
'--long',
action='store_true',
help=_('Show more fields in output')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
try:
data = heat_client.software_deployments.get(
deployment_id=parsed_args.deployment)
except heat_exc.HTTPNotFound:
raise exc.CommandError(
_('Software Deployment not found: %s')
% parsed_args.deployment)
else:
columns = [
'id',
'server_id',
'config_id',
'creation_time',
'updated_time',
'status',
'status_reason',
'input_values',
'action',
]
if parsed_args.long:
columns.append('output_values')
return columns, utils.get_item_properties(data, columns)
class ShowMetadataDeployment(command.Command):
"""Get deployment configuration metadata for the specified server."""
log = logging.getLogger(__name__ + '.ShowMetadataDeployment')
def get_parser(self, prog_name):
parser = super(ShowMetadataDeployment, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('ID of the server to fetch deployments for')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
md = heat_client.software_deployments.metadata(
server_id=parsed_args.server)
print(jsonutils.dumps(md, indent=2))
class ShowOutputDeployment(command.Command):
"""Show a specific deployment output."""
log = logging.getLogger(__name__ + '.ShowOutputDeployment')
def get_parser(self, prog_name):
parser = super(ShowOutputDeployment, self).get_parser(prog_name)
parser.add_argument(
'deployment',
metavar='<deployment>',
help=_('ID of deployment to show the output for')
)
parser.add_argument(
'output',
metavar='<output-name>',
nargs='?',
default=None,
help=_('Name of an output to display')
)
parser.add_argument(
'--all',
default=False,
action='store_true',
help=_('Display all deployment outputs')
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='Show full deployment logs in output',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
if (not parsed_args.all and parsed_args.output is None or
parsed_args.all and parsed_args.output is not None):
raise exc.CommandError(
_('Error: either %(output)s or %(all)s argument is needed.')
% {'output': '<output-name>', 'all': '--all'})
try:
sd = heat_client.software_deployments.get(
deployment_id=parsed_args.deployment)
except heat_exc.HTTPNotFound:
raise exc.CommandError(_('Deployment not found: %s')
% parsed_args.deployment)
outputs = sd.output_values
if outputs:
if parsed_args.all:
print('output_values:\n')
for k in outputs:
format_utils.print_software_deployment_output(
data=outputs, name=k, long=parsed_args.long)
else:
if parsed_args.output not in outputs:
msg = (_('Output %(output)s does not exist in deployment'
' %(deployment)s')
% {'output': parsed_args.output,
'deployment': parsed_args.deployment})
raise exc.CommandError(msg)
else:
print('output_value:\n')
format_utils.print_software_deployment_output(
data=outputs, name=parsed_args.output)
|
apache-2.0
| 8,401,926,805,075,033,000
| 35.533708
| 79
| 0.557204
| false
| 4.55871
| true
| false
| false
|
masc3d/btrfs-sxbackup
|
btrfs_sxbackup/shell.py
|
1
|
2492
|
# Copyright (c) 2014 Marco Schindler
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
import subprocess
import logging
_logger = logging.getLogger(__name__)
def build_subprocess_args(cmd, url=None):
"""
Create subprocess arguments for shell command/args to be executed
Internally Wraps command into ssh call if url host name is not None
:param cmd: Shell command string or argument list
:param url: url of remote host
:return: Subprocess arguments
"""
# in case cmd is a regular value, convert to list
cmd = cmd if isinstance(cmd, list) else [cmd]
# wrap into bash or ssh command respectively
# depending if command is executed locally (host==None) or remotely
url_string = None
ssh_args = ['ssh', '-o', 'ServerAliveInterval=5', '-o', 'ServerAliveCountMax=3']
if url is not None and url.hostname is not None:
url_string = url.hostname
if url.username is not None:
url_string = '%s@%s' % (url.username, url.hostname)
if url.port is not None:
ssh_args += ['-p', '%s' % url.port]
ssh_args += ['%s' % url_string]
subprocess_args = ['bash', '-c'] + cmd if url_string is None else \
ssh_args + cmd
_logger.debug(subprocess_args)
return subprocess_args
def exec_check_output(cmd, url=None) -> bytes:
"""
Wrapper for subprocess.check_output
:param cmd: Command text
:param url: URL
:return: output
"""
return subprocess.check_output(build_subprocess_args(cmd, url), stderr=subprocess.STDOUT)
def exec_call(cmd, url=None) -> int:
"""
Wrapper for subprocess.call
:param cmd: Command text
:param url: URL
:return:
"""
return subprocess.call(build_subprocess_args(cmd, url), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def exists(command, url=None):
"""
Check if shell command exists
:param command: Command to verify
:param url: url of remote host
:return: True if location exists, otherwise False
"""
type_prc = subprocess.Popen(build_subprocess_args(['type ' + command], url),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
return type_prc.wait() == 0
|
gpl-2.0
| 3,123,260,312,765,524,000
| 30.15
| 107
| 0.642055
| false
| 3.968153
| false
| false
| false
|
Petr-By/qtpyvis
|
dltb/tool/adversarial.py
|
1
|
1626
|
"""Tools for creating and definding againts adversarial examples.
"""
# third party imports
import numpy as np
# Toolbox imports
from ..datasource import Datasource
from .tool import Tool
from .classifier import Classifier, ClassIdentifier
class Attacker(Tool):
# pylint: disable=too-many-ancestors
"""An attacker can create adversarial attacks ("adversarial examples")
for a given victim. Currently we assume that the victim is a
classifier.
"""
def attack(self, victim: Classifier, example: np.ndarray,
correct: ClassIdentifier, target: ClassIdentifier = None,
**kwargs) -> np.ndarray:
"""
Perform an attack against a victim.
Arguments
---------
victim: Classifier
The victim (classifier) to attack.
example: np.ndarray
The example that should be altered.
correct: ClassIdentifier
Correct class label for the example.
target: ClassIdentifier
Target label for the attack. If none is given,
an untargeted attack is done.
Result
------
adversarial_example: np.ndarray
The adversarial example created by the attack.
"""
class Defender(Tool):
# pylint: disable=too-many-ancestors
"""A :py:class:`Defender` aims at making a victim more robust
against adversarial attacks.
"""
def defend(self, victim: Classifier, attacker: Attacker = None,
datasource: Datasource = None, **kwargs) -> None:
"""Defend the victim against adversarial attacks.
"""
|
mit
| -8,583,027,495,194,409,000
| 27.526316
| 74
| 0.633456
| false
| 4.179949
| false
| false
| false
|
cvandeplas/plaso
|
plaso/parsers/firefox_cache_test.py
|
1
|
6803
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Firefox cache files parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import firefox_cache as firefox_cache_formatter
from plaso.lib import errors
from plaso.lib import timelib_test
from plaso.parsers import firefox_cache
from plaso.parsers import test_lib
__author__ = 'Petter Bjelland (petter.bjelland@gmail.com)'
class FirefoxCacheTest(test_lib.ParserTestCase):
"""A unit test for the FirefoxCacheParser."""
def setUp(self):
self._parser = firefox_cache.FirefoxCacheParser()
def VerifyMajorMinor(self, events):
"""Verify that valid Firefox cahce version is extracted."""
for event_object in events:
self.assertEquals(event_object.major, 1)
self.assertEquals(event_object.minor, 19)
def testParseCache_InvalidFile(self):
"""Verify that parser do not accept small, invalid files."""
test_file = self._GetTestFilePath(['firefox_cache', 'invalid_file'])
with self.assertRaises(errors.UnableToParseFile):
_ = self._ParseFile(self._parser, test_file)
def testParseCache_001(self):
"""Test Firefox 28 cache file _CACHE_001_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'_CACHE_001_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(574, len(event_objects))
self.assertEquals(event_objects[1].url,
'HTTP:http://start.ubuntu.com/12.04/sprite.png')
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-04-21 14:13:35'))
self.VerifyMajorMinor(event_objects)
expected_msg = (
u'Fetched 2 time(s) '
u'[HTTP/1.0 200 OK] GET '
u'"HTTP:http://start.ubuntu.com/12.04/sprite.png"')
expected_msg_short = (
u'[HTTP/1.0 200 OK] GET '
u'"HTTP:http://start.ubuntu.com/12.04/sprite.png"')
self._TestGetMessageStrings(event_objects[1],
expected_msg, expected_msg_short)
def testParseCache_002(self):
"""Test Firefox 28 cache file _CACHE_002_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'_CACHE_002_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(58, len(event_objects))
self.assertEquals(event_objects[2].url,
('HTTP:http://www.google-analytics.com/__utm.gif?utmwv=5.5.0&utms='
'1&utmn=1106893631&utmhn=www.dagbladet.no&utmcs=windows-1252&ut'
'msr=1920x1080&utmvp=1430x669&utmsc=24-bit&utmul=en-us&utmje=0&'
'utmfl=-&utmdt=Dagbladet.no%20-%20forsiden&utmhid=460894302&utm'
'r=-&utmp=%2F&utmht=1398089458997&utmac=UA-3072159-1&utmcc=__ut'
'ma%3D68537988.718312608.1398089459.1398089459.1398089459.1%3B%'
'2B__utmz%3D68537988.1398089459.1.1.utmcsr%3D(direct)%7Cutmccn'
'%3D(direct)%7Cutmcmd%3D(none)%3B&aip=1&utmu=qBQ~'))
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-04-21 14:10:58'))
self.VerifyMajorMinor(event_objects)
def testParseCache_003(self):
"""Test Firefox 28 cache file _CACHE_003_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'_CACHE_003_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(4, len(event_objects))
self.assertEquals(event_objects[3].url,
'HTTP:https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js')
self.assertEquals(event_objects[3].timestamp,
timelib_test.CopyStringToTimestamp('2014-04-21 14:11:07'))
self.VerifyMajorMinor(event_objects)
def testParseAlternativeFilename(self):
"""Test Firefox 28 cache 003 file with alternative filename."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox28',
'E8D65m01'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(4, len(event_objects))
def testParseLegacyCache_001(self):
"""Test Firefox 3 cache file _CACHE_001_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox3',
'_CACHE_001_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(25, len(event_objects))
self.assertEquals(event_objects[0].timestamp,
timelib_test.CopyStringToTimestamp('2014-05-02 14:15:03'))
expected_msg = (
u'Fetched 1 time(s) '
u'[HTTP/1.1 200 OK] GET '
u'"HTTP:http://start.mozilla.org/en-US/"')
expected_msg_short = (
u'[HTTP/1.1 200 OK] GET '
u'"HTTP:http://start.mozilla.org/en-US/"')
self._TestGetMessageStrings(event_objects[0],
expected_msg, expected_msg_short)
def testParseLegacyCache_002(self):
"""Test Firefox 3 cache file _CACHE_002_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox3',
'_CACHE_002_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(3, len(event_objects))
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-05-02 14:25:55'))
def testParseLegacyCache_003(self):
"""Test Firefox 3 cache file _CACHE_003_ parsing."""
test_file = self._GetTestFilePath(['firefox_cache', 'firefox3',
'_CACHE_003_'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(2, len(event_objects))
self.assertEquals(event_objects[1].timestamp,
timelib_test.CopyStringToTimestamp('2014-05-02 14:15:07'))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -3,736,644,465,021,609,000
| 35.772973
| 80
| 0.694694
| false
| 3.344641
| true
| false
| false
|
Pikecillo/genna
|
external/PyXML-0.8.4/xml/xpath/ParsedAbbreviatedRelativeLocationPath.py
|
1
|
2141
|
########################################################################
#
# File Name: ParsedAbbreviatedRelativeLocationPath.py
#
#
"""
A parsed token that represents a abbreviated relative location path.
WWW: http://4suite.org/XPATH e-mail: support@4suite.org
Copyright (c) 2000-2001 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.org/COPYRIGHT for license and copyright information
"""
from xml.xpath import ParsedNodeTest
from xml.xpath import ParsedPredicateList
from xml.xpath import ParsedAxisSpecifier
from xml.xpath import ParsedStep
import Set
class ParsedAbbreviatedRelativeLocationPath:
def __init__(self,left,right):
"""
left can be a step or a relative location path
right is only a step
"""
self._left = left
self._right = right
pnt = ParsedNodeTest.ParsedNodeTest('node','')
ppl = ParsedPredicateList.ParsedPredicateList([])
pas = ParsedAxisSpecifier.ParsedAxisSpecifier('descendant-or-self')
self._middle = ParsedStep.ParsedStep(pas, pnt, ppl)
def evaluate(self, context):
res = []
rt = self._left.select(context)
l = len(rt)
origState = context.copyNodePosSize()
for ctr in range(l):
context.setNodePosSize((rt[ctr],ctr+1,l))
subRt = self._middle.select(context)
res = Set.Union(res,subRt)
rt = res
res = []
l = len(rt)
for ctr in range(l):
context.setNodePosSize((rt[ctr],ctr+1,l))
subRt = self._right.select(context)
res = Set.Union(res,subRt)
context.setNodePosSize(origState)
return res
select = evaluate
def pprint(self, indent=''):
print indent + str(self)
self._left.pprint(indent + ' ')
self._middle.pprint(indent + ' ')
self._right.pprint(indent + ' ')
def __str__(self):
return '<AbbreviatedRelativeLocationPath at %x: %s>' % (
id(self),
repr(self),
)
def __repr__(self):
return repr(self._left) + '//' + repr(self._right)
|
gpl-2.0
| 2,580,934,514,517,384,700
| 29.15493
| 75
| 0.586175
| false
| 3.723478
| false
| false
| false
|
b1r3k/recruitment-challanges
|
data-hacking/src/task1/classifier/channel.py
|
1
|
1678
|
'''
* Author: Lukasz Jachym
* Date: 9/14/13
* Time: 5:40 PM
*
* This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
* To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/.
'''
from collections import namedtuple
from branding import is_branded
DIRECT = 1
PPC = 2
SEO = 3
Channel = namedtuple('Channel', ['type', 'keywords', 'branded'])
def get_ppc_keywords(ppc_str):
"""
:param ppc_str:
:return: keywords
"""
try:
keywords = ppc_str.split('_')[2]
except KeyError:
raise ValueError
return keywords
def get_seo_keywords(seo_query):
"""
:param ppc_str:
:return: keywords
"""
try:
keywords = seo_query.split(' :: ')[2]
except KeyError:
raise ValueError
return keywords
def parse_source(source_str):
if (source_str[0:3] == 'ppc'):
channel_type = PPC
keywords = get_ppc_keywords(source_str)
channel_keywords = keywords
channel_branded = is_branded(keywords)
channel = Channel(channel_type, channel_keywords, channel_branded)
else:
if source_str[0:3] == 'seo':
channel_type = SEO
keywords = get_seo_keywords(source_str)
channel_keywords = keywords
channel_branded = is_branded(keywords)
channel = Channel(channel_type, channel_keywords, channel_branded)
else:
channel_type = DIRECT
channel_keywords = None
channel_branded = False
channel = Channel(channel_type, channel_keywords, channel_branded)
return channel
|
mit
| -6,849,401,520,349,252,000
| 23.691176
| 110
| 0.61621
| false
| 3.570213
| false
| false
| false
|
leriomaggio/pycon_site
|
p3/templatetags/p3.py
|
1
|
21562
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
import mimetypes
import os
import os.path
import re
import random
import sys
import urllib
from collections import defaultdict
from datetime import datetime
from itertools import groupby
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from conference import dataaccess as cdataaccess
from conference import models as ConferenceModels
from conference.settings import STUFF_DIR, STUFF_URL
from assopy import models as amodels
from p3 import dataaccess
from p3 import forms as p3forms
from p3 import models
from fancy_tag import fancy_tag
mimetypes.init()
register = template.Library()
@register.inclusion_tag('p3/box_pycon_italia.html')
def box_pycon_italia():
return {}
@register.inclusion_tag('p3/box_newsletter.html', takes_context=True)
def box_newsletter(context):
return context
@register.inclusion_tag('p3/box_cal.html', takes_context = True)
def box_cal(context, limit=None):
deadlines = ConferenceModels.Deadline.objects.valid_news()
if limit:
deadlines = deadlines[:int(limit)]
return {
'deadlines': [ (d, d.content(context['LANGUAGE_CODE'])) for d in deadlines ]
}
@register.inclusion_tag('p3/render_cal.html', takes_context=True)
def render_cal(context):
return context
@register.inclusion_tag('p3/box_download.html', takes_context = True)
def box_download(context, fname, label=None):
if '..' in fname:
raise template.TemplateSyntaxError("file path cannot contains ..")
if fname.startswith('/'):
raise template.TemplateSyntaxError("file path cannot starts with /")
if label is None:
label = os.path.basename(fname)
try:
fpath = os.path.join(settings.STUFF_DIR, fname)
stat = os.stat(fpath)
except (AttributeError, OSError), e:
fsize = ftype = None
else:
fsize = stat.st_size
ftype = mimetypes.guess_type(fpath)[0]
return {
'url': context['STUFF_URL'] + fname,
'label': label,
'fsize': fsize,
'ftype': ftype,
}
@register.inclusion_tag('p3/box_didyouknow.html', takes_context = True)
def box_didyouknow(context):
try:
d = ConferenceModels.DidYouKnow.objects.filter(visible = True).order_by('?')[0]
except IndexError:
d = None
return {
'd': d,
'LANGUAGE_CODE': context.get('LANGUAGE_CODE'),
}
@register.inclusion_tag('p3/box_googlemaps.html', takes_context = True)
def box_googlemaps(context, what='', zoom=13):
what = ','.join([ "'%s'" % w for w in what.split(',') ])
return {
'rand': random.randint(0, sys.maxint - 1),
'what': what,
'zoom': zoom
}
@register.inclusion_tag('p3/box_talks_conference.html', takes_context = True)
def box_talks_conference(context, talks):
"""
mostra i talk passati raggruppati per conferenza
"""
conf = defaultdict(list)
for t in talks:
conf[t.conference].append(t)
talks = []
for c in reversed(sorted(conf.keys())):
talks.append((c, conf[c]))
return { 'talks': talks }
@register.inclusion_tag('p3/box_latest_tweets.html', takes_context=True)
def box_latest_tweets(context):
ctx = Context(context)
ctx.update({
'screen_name': settings.P3_TWITTER_USER,
})
return ctx
@register.filter
def render_time(tweet, args=None):
time = tweet["timestamp"]
time = datetime.datetime.fromtimestamp(time)
return time.strftime("%d-%m-%y @ %H:%M")
@register.filter
def check_map(page):
"""
controlla se la pagina passata richiede o meno una mappa
"""
if page:
return '{% render_map' in page.expose_content()
return False
@register.inclusion_tag('p3/render_map.html', takes_context=True)
def render_map(context):
return {}
@register.inclusion_tag('p3/fragments/render_ticket.html', takes_context=True)
def render_ticket(context, ticket):
from p3 import forms
user = context['request'].user
if ticket.fare.ticket_type == 'conference':
try:
inst = ticket.p3_conference
except:
inst = None
form = forms.FormTicket(
instance=inst,
initial={
'ticket_name': ticket.name,
},
prefix='t%d' % (ticket.id,),
single_day=ticket.fare.code[2] == 'D',
)
if inst and inst.assigned_to:
blocked = inst.assigned_to != user.email
else:
blocked = False
elif ticket.fare.code in ('SIM01',):
try:
inst = ticket.p3_conference_sim
except:
inst = None
form = forms.FormTicketSIM(
instance=inst,
initial={
'ticket_name': ticket.name,
},
prefix='t%d' % (ticket.id,),
)
blocked = False
elif ticket.fare.code.startswith('H'):
# TicketRoom instances must exist, they're created by a listener
inst = ticket.p3_conference_room
form = forms.FormTicketRoom(
instance=inst,
initial={
'ticket_name': ticket.name,
},
prefix='t%d' % (ticket.id,),
)
blocked = False
else:
form = forms.FormTicketPartner(instance=ticket, prefix='t%d' % (ticket.id,))
blocked = False
ctx = Context(context)
ctx.update({
'ticket': ticket,
'form': form,
'user': user,
'blocked': blocked,
})
return ctx
@register.assignment_tag(takes_context=True)
def fares_available(context, fare_type, sort=None):
"""
Restituisce l'elenco delle tariffe attive in questo momento per la
tipologia specificata.
"""
assert fare_type in ('all', 'conference', 'goodies', 'partner', 'hotel-room', 'hotel-room-sharing', 'other')
if not settings.P3_FARES_ENABLED(context['user']):
return []
fares_list = filter(lambda f: f['valid'], cdataaccess.fares(settings.CONFERENCE_CONFERENCE))
if fare_type == 'conference':
fares = [ f for f in fares_list if f['code'][0] == 'T' and f['ticket_type'] == 'conference' ]
elif fare_type == 'hotel-room-sharing':
fares = [ f for f in fares_list if f['code'].startswith('HB') ]
elif fare_type == 'hotel-room':
fares = [ f for f in fares_list if f['code'].startswith('HR') ]
elif fare_type == 'other':
fares = [ f for f in fares_list if f['ticket_type'] in ('other', 'event') and f['code'][0] != 'H' ]
elif fare_type == 'partner':
fares = [ f for f in fares_list if f['ticket_type'] in 'partner' ]
elif fare_type == 'all':
fares = fares_list
if sort == "price":
fares.sort(key=lambda x: x['price'])
return fares
@fancy_tag(register, takes_context=True)
def render_cart_rows(context, fare_type, form):
assert fare_type in ('conference', 'goodies', 'partner', 'hotel-room', 'hotel-room-sharing', 'other')
ctx = Context(context)
request = ctx['request']
try:
company = request.user.assopy_user.account_type == 'c'
except AttributeError:
# anonymous user or without an assopy profile (impossible!)
company = False
ctx.update({
'form': form,
'company': company,
})
fares_list = filter(lambda f: f['valid'], cdataaccess.fares(settings.CONFERENCE_CONFERENCE))
if fare_type == 'conference':
tpl = 'p3/fragments/render_cart_conference_ticket_row.html'
# rendering "conference" tickets is a bit complex; each row in
# the cart corresponds to multiple "fare" (student, private, copany)
#
# The prices must be sorted on time + ticket type + owner
# early
# full [Student, Private, Company]
# lite (standard) [Student, Private, Company]
# daily [Student, Private, Company]
# regular (late)
# ...
# on desk
# ...
#
# The correct time ordering is guaranteed implicitly by
# excluding expired fares (it's not permitted to have overlaps
# of validity periods).
fares = dict((f['code'][2:], f) for f in fares_list if f['code'][0] == 'T')
rows = []
for t in ('S', 'L', 'D'):
# To simplify the template fares are packed in triplets:
# student, private, company.
#
# Each raw is a tuple with three elements:
# 1. Fare
# 2. FormField
# 3. Boolean flag telling if the price can be applied to the user
row = []
for k in ('S', 'P', 'C'):
try:
f = fares[t+k]
except KeyError:
row.append((None, None, None))
else:
# The price is valid if the time test is passed and if the
# account type is compatible
valid = not (company ^ (f['code'][-1] == 'C'))
row.append((f, form.__getitem__(f['code']), valid))
rows.append(row)
ctx['rows'] = rows
elif fare_type == 'hotel-room-sharing':
tpl = 'p3/fragments/render_cart_hotel_ticket_row.html'
ctx['field'] = form['bed_reservations']
ctx['field'].field.widget._errors = ctx['field'].errors
elif fare_type == 'hotel-room':
tpl = 'p3/fragments/render_cart_hotel_ticket_row.html'
ctx['field'] = form['room_reservations']
ctx['field'].field.widget._errors = ctx['field'].errors
elif fare_type == 'other':
tpl = 'p3/fragments/render_cart_og_ticket_row.html'
fares = defaultdict(dict)
order = ('p', 'c')
columns = set()
for f in fares_list:
if f['ticket_type'] in ('other', 'event') and f['code'][0] != 'H':
columns.add(f['recipient_type'])
fares[f['name']][f['recipient_type']] = f
ctx['fares'] = fares.values()
ctx['recipient_types'] = sorted(columns, key=lambda v: order.index(v))
elif fare_type == 'partner':
tpl = 'p3/fragments/render_cart_partner_ticket_row.html'
ctx['fares'] = [ f for f in fares_list if f['ticket_type'] in 'partner' ]
return render_to_string(tpl, ctx)
@register.inclusion_tag('p3/box_image_gallery.html', takes_context=True)
def box_image_gallery(context):
images = []
for f in os.listdir(STUFF_DIR):
images.append('%s%s' % (STUFF_URL, f))
context.update({
'images': images,
})
return context
@fancy_tag(register, takes_context=True)
def render_partner_program(context, conference=None):
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
from conference import dataaccess
from conference.templatetags.conference import fare_blob
fares = [ x for x in dataaccess.fares(conference) if x['ticket_type'] == 'partner' and x['valid'] ]
fares.sort(key=lambda x: (slugify(x['name']), fare_blob(x, 'date')))
ctx = Context(context)
ctx.update({
'fares': [ (k, list(v)) for k, v in groupby(fares, key=lambda x: slugify(x['name'])) ],
})
return render_to_string('p3/fragments/render_partner_program.html', ctx)
@fancy_tag(register, takes_context=True)
def event_partner_program(context, event):
fare_id = re.search(r'f(\d+)', event.track)
if fare_id is None:
return ''
from conference.templatetags.conference import _request_cache
c = _request_cache(context['request'], 'fares')
if not c:
for f in ConferenceModels.Fare.objects.all():
c[str(f.id)] = f
fare = c[fare_id.group(1)]
return mark_safe('<a href="/partner-program/#%s">%s</a>' % (slugify(fare.name), event.custom,))
@register.filter
def schedule_to_be_splitted(s):
tracks = ConferenceModels.Track.objects.by_schedule(s)
s = []
for t in tracks:
if t.track.startswith('partner') or t.track.startswith('sprint'):
s.append(t)
return len(tracks) != len(s)
@register.filter
def tickets_url(user):
"""
ritorna la url più diretta per mandare l'utente sulla sua pagina ticket
"""
if user.assopy_user.token:
u = reverse('p3-user', kwargs={'token': user.assopy_user.token})
else:
u = reverse('p3-tickets')
return settings.DEFAULT_URL_PREFIX + u
@register.filter
def ticket_user(ticket):
try:
p3c = ticket.p3_conference
except models.TicketConference.DoesNotExist:
p3c = None
if p3c and p3c.assigned_to:
from assopy.models import User
return User.objects.get(user__email=p3c.assigned_to)
else:
return ticket.orderitem.order.user
@register.filter
def com_com_registration(user):
url = 'https://hotspot.com-com.it/signup/?'
name = user.name()
try:
fn, ln = name.split(' ', 1)
except ValueError:
fn = name
ln = ''
params = {
'autofill': 'yes',
'firstname': fn,
'lastname': ln,
'email': user.user.email,
}
if user.country:
params['nationality'] = user.country.pk
if user.phone and user.phone.startswith('+39'):
params['ita_mobile'] = user.phone
params['username'] = name.lower().replace(' ', '').replace('.', '')[:12]
for k, v in params.items():
params[k] = v.encode('utf-8')
return url + urllib.urlencode(params)
@register.inclusion_tag('p3/box_next_events.html', takes_context=True)
def box_next_events(context):
from conference.templatetags import conference as ctags
t = datetime.now()
try:
sch = ConferenceModels.Schedule.objects.get(date=t.date())
except ConferenceModels.Schedule.DoesNotExist:
current = next = {}
else:
current = ctags.current_events(context, t)
next = ctags.next_events(context, t)
tracks = dict(
(x, None)
for x in ConferenceModels.Track.objects.by_schedule(sch)
if x.outdoor == False
)
for track in tracks:
c = current.get(track)
if c:
if hasattr(c, 'evt'):
c = c.evt.ref
else:
c = c.ref
n = next.get(track)
if n:
n_time = n.time
if hasattr(n, 'evt'):
n = n.evt.ref
else:
n = n.ref
else:
n_time = None
tracks[track] = {
'current': c,
'next': (n, n_time),
}
events = sorted(tracks.items(), key=lambda x: x[0].order)
ctx = Context(context)
ctx.update({
'events': events,
})
return ctx
@fancy_tag(register)
def p3_profile_data(uid):
return dataaccess.profile_data(uid)
@fancy_tag(register)
def p3_profiles_data(uids):
return dataaccess.profiles_data(uids)
@fancy_tag(register)
def p3_talk_data(tid):
return dataaccess.talk_data(tid)
@fancy_tag(register, takes_context=True)
def get_form(context, name, bound="auto", bound_field=None):
if '.' in name:
from conference.utils import dotted_import
fc = dotted_import(name)
else:
fc = getattr(p3forms, name)
request = context['request']
if bound:
if bound == 'auto':
bound = request.method
if bound == 'GET':
data = request.GET
elif bound == 'POST':
data = request.POST
else:
from django.db.models import Model
if isinstance(bound, Model):
data = {}
for name in fc.base_fields:
data[name] = getattr(bound, name)
else:
data = bound
if bound_field and bound_field not in data:
data = None
else:
data = None
form = fc(data=data)
if data:
form.is_valid()
return form
@fancy_tag(register)
def pending_email_change(user):
try:
t = amodels.Token.objects.get(ctype='e', user=user)
except amodels.Token.DoesNotExist:
return None
return t.payload
@fancy_tag(register)
def admin_ticketroom_overall_status():
status = models.TicketRoom.objects.overall_status()
labels = dict(models.HOTELROOM_ROOM_TYPE)
days = sorted(status.keys())
rooms = {}
for day in days:
dst = status[day]
for room_type, dst in status[day].items():
try:
r = rooms[room_type]
except KeyError:
r = rooms[room_type] = {
'type': room_type,
'label': labels.get(room_type, room_type),
'days': [],
}
r['days'].append(dst)
return {
'days': days,
'rooms': rooms.values(),
}
@fancy_tag(register)
def warmup_conference_cache(conference=None):
"""
"""
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
qs = ConferenceModels.TalkSpeaker.objects\
.filter(talk__conference=conference)\
.values_list('talk', 'speaker')
talks = set()
speakers = set()
for row in qs:
talks.add(row[0])
speakers.add(row[1])
return {
'speakers': dict([ (x['id'], x) for x in dataaccess.profiles_data(speakers) ]),
'talks': dict([ (x['id'], x) for x in cdataaccess.talks_data(talks) ]),
}
@register.filter
def frozen_reason(ticket):
if not ticket.frozen:
return ''
if amodels.RefundOrderItem.objects.filter(orderitem=ticket.orderitem).exists():
return 'refund pending'
else:
return ''
@fancy_tag(register, takes_context=True)
def all_user_tickets(context, uid=None, conference=None, status="complete", fare_type="conference"):
if uid is None:
uid = context['request'].user.id
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
tickets = dataaccess.all_user_tickets(uid, conference)
if status == 'complete':
tickets = filter(lambda x: x[3], tickets)
elif status == 'incomplete':
tickets = filter(lambda x: not x[3], tickets)
if fare_type != "all":
tickets = filter(lambda x: x[1] == fare_type, tickets)
return tickets
@fancy_tag(register)
def p3_tags():
return dataaccess.tags()
@fancy_tag(register)
def p3_tags_for_talks():
conference = settings.CONFERENCE_CONFERENCE
return dataaccess.tags_for_conference_talks(conference=conference)
@fancy_tag(register, takes_context=True)
def render_profile_box(context, profile, conference=None, user_message="auto"):
if conference is None:
conference = settings.CONFERENCE_CONFERENCE
if isinstance(profile, int):
profile = dataaccess.profile_data(profile)
ctx = Context(context)
ctx.update({
'profile': profile,
'conference': conference,
'user_message': user_message if user_message in ('auto', 'always', 'none') else 'auto',
})
return render_to_string('p3/fragments/render_profile_box.html', ctx)
@register.inclusion_tag('p3/fragments/archive.html', takes_context=True)
def render_archive(context, conference):
ctx = Context(context)
def match(e, exclude_tags=set(('partner0', 'partner1', 'sprint1', 'sprint2', 'sprint3'))):
if e['tags'] & exclude_tags:
return False
if not e['talk']:
return False
return True
events = { x['id']:x for x in filter(match, cdataaccess.events(conf=conference)) }
talks = {}
for e in events.values():
t = e['talk']
if t['id'] in talks:
continue
t['dates'] = sorted([ (events[x]['time'], events[x]['talk']['video_url']) for x in t['events_id'] ])
talks[t['id']] = t
ctx.update({
'conference': conference,
'talks': sorted(talks.values(), key=lambda x: x['title']),
})
return ctx
@register.filter
def timetable_remove_first(timetable, tag):
if not tag:
return timetable
start = None
for time, events in timetable.iterOnTimes():
stop = False
for e in events:
if tag not in e['tags']:
stop = True
break
start = time.time()
if stop:
break
return timetable.slice(start=start)
@register.assignment_tag
def p3_voting_data(conference):
from conference.templatetags.conference import voting_data
from conference.utils import voting_results
groups = defaultdict(list)
results = voting_results()
if results is not None:
talk_ids = [ x[0] for x in results ]
sub_community = dict(
models.P3Talk.objects\
.filter(talk__conference=conference)\
.values_list('talk', 'sub_community'))
for tid, type, language in results:
community = sub_community.get(tid, '')
groups[(type, community)].append(tid)
results = voting_data(conference)
results['groups'] = dict(groups)
return results
@fancy_tag(register, takes_context=True)
def get_latest_conf_deadline(context, limit=None, not_expired=True):
try:
conf = ConferenceModels.Conference.objects.latest('code')
return [conf.name, conf.code, conf.conference_start, conf.conference_end, datetime.today().date()]
except IndexError:
return []
|
bsd-2-clause
| -4,565,564,612,632,686,600
| 31.471386
| 112
| 0.596215
| false
| 3.569111
| false
| false
| false
|
Semprini/cbe-retail
|
retail/store/models.py
|
1
|
2047
|
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from cbe.location.models import Location, GeographicArea
from cbe.party.models import Organisation, PartyRole
from cbe.physical_object.models import Structure
class Store(PartyRole):
enterprise_id = models.IntegerField(unique=True)
code = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
identifiers = GenericRelation('human_resources.Identification', object_id_field="party_role_object_id", content_type_field='party_role_content_type', related_query_name='store')
store_type = models.CharField( max_length=100, null=True, blank=True, choices=(('mitre 10','mitre 10'),('mega','mega'),('hammer','hammer'),('trade','trade')) )
store_class = models.CharField( max_length=100, null=True, blank=True, choices=(('mega-1','mega-1'),('mega-2','mega-2'),('mega-3','mega-3'),('mega-r','mega-r'),('mitre10-small','mitre10-small'),('mitre10-medium','mitre10-medium'),('mitre10-large','mitre10-large'),('trade','trade')) )
opening_date = models.DateField(blank=True, null=True)
location = models.ForeignKey(Location, on_delete=models.CASCADE, blank=True, null=True)
trade_area = models.ForeignKey(GeographicArea, on_delete=models.CASCADE, related_name='store_trade_areas', blank=True, null=True)
retail_area = models.ForeignKey(GeographicArea, on_delete=models.CASCADE, related_name='store_retail_areas', blank=True, null=True)
national_area = models.ForeignKey(GeographicArea, on_delete=models.CASCADE, related_name='store_national_areas', blank=True, null=True)
buildings = models.ManyToManyField(Structure, related_name='store')
class Meta:
ordering = ['id']
def __str__(self):
return "%s" %(self.name )
def save(self, *args, **kwargs):
if self.name == "":
self.name = "Store"
super(Store, self).save(*args, **kwargs)
|
apache-2.0
| -8,812,676,983,199,888,000
| 52.894737
| 288
| 0.682462
| false
| 3.610229
| false
| false
| false
|
ytec/instaforex-web
|
app/open/migrations/0001_initial.py
|
1
|
1404
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='form',
fields=[
('cmsplugin_ptr', models.OneToOneField(primary_key=True, serialize=False, auto_created=True, related_name='open_form', parent_link=True, to='cms.CMSPlugin')),
('name', models.CharField(max_length=25, default='Demo')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='OpenAccountAnonymous',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.CreateModel(
name='OpenAccountDemo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.CreateModel(
name='OpenAccountReal',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
]
|
gpl-3.0
| 2,533,682,071,263,246,300
| 31.651163
| 174
| 0.533476
| false
| 4.471338
| false
| false
| false
|
slideclick/toys
|
Simple/oMachine2.3.1-1.py
|
1
|
3469
|
# -* - coding: UTF-8 -* -
## Virtual Machine 2.3.1
## 小步语义 -- 表达式
## python 3.4
class Number(object):
""" 数值符号类
"""
def __init__(self, value):
self.value = value
def reducible(self):
return False
def to_s(self):
return str(self.value)
class Boolean(object):
""" 布尔值符号类型
"""
def __init__(self, value):
self.value = value
def reducible(self):
return False
def to_s(self):
return str(self.value)
class Add(object):
""" 加法符号类
"""
def __init__(self, left, right):
self.left = left
self.right = right
def reducible(self):
return True
def reduce(self, environment):
if self.left.reducible():
return Add(self.left.reduce(environment), self.right)
elif self.right.reducible():
return Add(self.left, self.right.reduce(environment))
else:
return Number(self.left.value + self.right.value)
def to_s(self):
return self.left.to_s() + ' + ' + self.right.to_s()
class Multiply(object):
""" 乘法符号类
"""
def __init__(self, left, right):
self.left = left
self.right = right
def reducible(self):
return True
def reduce(self, environment):
if self.left.reducible():
return Multiply(self.left.reduce(environment), self.right)
elif self.right.reducible():
return Multiply(self.left, self.right.reduce(environment))
else:
return Number(self.left.value * self.right.value)
def to_s(self):
return self.left.to_s() + ' * ' + self.right.to_s()
class LessThan(object):
""" 小于符号类
"""
def __init__(self, left, right):
self.left = left
self.right = right
def reducible(self):
return True
def reduce(self, environment):
if self.left.reducible():
return LessThan(self.left.reduce(environment), self.right)
elif self.right.reducible():
return LessThan(self.left, self.right.reduce(environment))
else:
return Boolean(self.left.value < self.right.value)
def to_s(self):
return self.left.to_s() + ' < ' + self.right.to_s()
class Variable(object):
""" 变量符号类
"""
def __init__(self, name):
self.name = name
def reducible(self):
return True
def reduce(self, environment):
return environment[self.name]
def to_s(self):
return str(self.name)
class Machine(object):
""" 虚拟机
"""
def __init__(self, expression, environment):
self.expression = expression
self.environment = environment
def step(self):
self.expression = self.expression.reduce(self.environment)
def run(self):
while self.expression.reducible():
print(self.expression.to_s())
self.step()
print(self.expression.value)
## test
## 在虚拟机中运行表达式
##1 * 2 + 3 * 4 = 14
Machine(Add(Multiply(Number(1), Number(2)),
Multiply(Number(3), Number(4))),
{}
).run()
print('')
##5 < 2 + 2
Machine(
LessThan(Number(5), Add(Number(2), Number(2))),
{}
).run()
print('')
##x = 3; y = 4; x + y = 7
Machine(
Add(Variable('x'), Variable('y')),
{'x':Number(3), 'y':Number(4)}
).run()
|
gpl-2.0
| -6,269,499,450,322,178,000
| 20.433121
| 70
| 0.55156
| false
| 3.273346
| false
| false
| false
|
jszopi/repESP
|
repESP/respin_format.py
|
1
|
19161
|
"""Parsing and writing ``resp`` program instruction file format ("respin")"""
from dataclasses import dataclass, asdict
from fortranformat import FortranRecordWriter as FW
from itertools import zip_longest
import io
import math
import re
import sys
from typing import Dict, List, Optional, TextIO, Tuple, Type, TypeVar, Union
from repESP.exceptions import InputFormatError
from repESP.equivalence import Equivalence
from repESP.types import Atom, Molecule
from repESP._util import get_line, zip_exact
IvaryT = TypeVar('IvaryT', bound='Respin.Ivary')
@dataclass
class Respin:
"""Dataclass describing the ``resp`` program instructions
Note that the functionality is currently limited to a single molecule and
a single structure.
Parameters
----------
title : str
The title of the calculation to be performed.
cntrl : Cntrl
Dataclass representing the "cntrl" section of the input.
subtitle : str
Subtitle describing the considered molecular structure.
charge : int
The total charge of the molecule.
molecule : Molecule[Atom]
The molecule which charges are being fitted. Only atom identities are required.
ivary : Ivary
The "ivary" values for fitting the considered structure. These determine
how the charge on each atom is allowed to vary during the fitting.
Attributes
----------
title
See initialization parameter
cntrl
See initialization parameter
subtitle
See initialization parameter
charge
See initialization parameter
molecule
See initialization parameter
ivary
See initialization parameter
"""
_ValueType = TypeVar("_ValueType", int, float, str)
@staticmethod
def _check_value(
attr_name: str,
attr_value: _ValueType,
allowed_values: List[_ValueType]
) -> None:
if attr_value not in allowed_values:
raise ValueError(
f"Invalid value for `{attr_name}`: {attr_value}."
)
@dataclass
class Cntrl:
"""Dataclass describing the "cntrl" section of a "respin" file
See ``resp`` program documentation for more details.
Parameters
----------
inopt : int, optional
If equal to 1, ``resp`` will cycle through different "qwt" values
from the file specified with the ``-w`` option. Defaults to 0.
ioutopt : int, optional
If equal to 1, ``resp`` will write restart info of new ESP field
to the file specified with the ``-s`` option. Defaults to 0.
iqopt : int, optional
Controls setting initial charges. If equal to 1 (default), all
initial charges will be set to zero. If equal to 2, initial charges
are read from the file specified with the ``-q`` option. If equal
to 3, charges are read as with the previous option and will
additionally be averaged according to "ivary" values (normally not
used).
ihfree : int, optional
If equal to 0, the charge magnitude restraint is applied to all
charges. If equal to 1 (default), the restraint does not apply to
hydrogen atoms.
irstrnt : int, optional
Controls the type of charge magnitude restraint. If equal to 0,
harmonic restraints are used (old-style). If equal to 1 (default),
hyperbolic restraints are used. If equal to 2, no charge fitting
is carried out and only analysis of input charges is performed.
qwt : float, optional
The weight of the charge magnitude restraint to be used during
the fitting. Defaults to 0.0 (no charge magnitude restraint).
.. warning::
The default used here is different from the default used by ``resp``.
That the ``resp`` documentation specifies that it uses the
Amber force field values by default. However, it is not clear how
it can determine the fitting stage. Thus, to remove the ambiguity,
this dataclass assumes a weight of zero by default.
.. note::
Amber force fields use values of 0.0005 and 0.001 for
stages 1 and 2, respectively. The Glycam force field is derived with
one stage fitting with a value of 0.01.
Attributes
----------
inopt
See initialization parameter
ioutopt
See initialization parameter
iqopt
See initialization parameter
ihfree
See initialization parameter
irstrnt
See initialization parameter
qwt
See initialization parameter
"""
inopt: int = 0
ioutopt: int = 0
iqopt: int = 1
ihfree: int = 1
irstrnt: int = 1
qwt: float = 0
@property
def nmol(self) -> int:
"""Number of structures in a multiple structure fit.
With the current implementation this will always be equal to 1.
"""
return 1
def __post_init__(self) -> None:
Respin._check_value("inopt", self.inopt, [0, 1])
Respin._check_value("ioutopt", self.ioutopt, [0, 1])
Respin._check_value("iqopt", self.iqopt, [1, 2, 3])
Respin._check_value("ihfree", self.ihfree, [0, 1])
Respin._check_value("irstrnt", self.irstrnt, [0, 1, 2])
if self.qwt < 0:
raise ValueError(f"Invalid value for `qwt`: {self.qwt}.")
@dataclass
class Ivary:
"""Dataclass representing per-atom fitting instructions for ``resp``
The fitting instructions are represented as a list of values stored in
the `values` attribute. The length of this list must be the same as the
number of atoms in the molecule it describes. Consecutive values refer
to consecutive atoms of the molecule.
The values determine how the charge on the atom can vary during the
fitting and the allowed values are:
* -1, meaning that the atom's charge is "frozen" at the initial value
* 0, meaning that this atom will be varied freely
* Larger than zero, representing the 1-based index of the atom in the
molecule to which this atom is to be equivalenced.
Example
-------
Consider fitting RESP charges in a molecule of methylamine:
>>> methylamine = Molecule([Atom(atomic_number) for atomic_number in [6, 1, 1, 1, 7, 1, 1]])
Fitting RESP charges consists of two stages. The ivary section for the
second stage of the fitting for the methylamine molecule should be as
follows:
>>> ivary = Respin.Ivary([0, 0, 2, 2, -1, -1, -1])
The carbon atom is free to vary during the fitting. The first of the methyl
hydrogens is equivalenced to the remaining two but they haven't been
specified yet, so it also has a value of 0. These two hydrogen atoms
are equivalenced to the first one, and thus are assigned its one-based
index in the molecule, i.e. 2 (meaning "equivalenced to the second atom
of the molecule"). The nitrogen atom and the two hydrogen atoms attached
to it are frozen during the second stage of the fitting and are thus
assigned values of -1.
Parameters
----------
values : List[int]
The per-atom instructions for the ``resp`` program.
Attributes
----------
values
See initialization parameter
"""
values: List[int]
def __post_init__(self) -> None:
for i, elem in enumerate(self.values):
if elem < -1 or elem > len(self.values):
raise ValueError(
f"Value number {i} passed as `ivary` with value {elem}, "
f"which is either lower than 0 or outside the list length."
)
def describe(self, molecule: Optional[Molecule[Atom]]=None) -> str:
"""Verbosely report the "ivary" actions
Example
-------
>>> print(ivary.describe(methylamine))
Atom (C) number 1
Atom (H) number 2
Atom (H) number 3, equivalenced to atom 2
Atom (H) number 4, equivalenced to atom 2
Atom (N) number 5, frozen
Atom (H) number 6, frozen
Atom (H) number 7, frozen
Parameters
----------
molecule : Optional[Molecule[Atom]], optional
The molecule to which the ivary information refers. This
argument is optional and defaults to None. If it is provided,
atom identities will be included in the output.
Raises
------
ValueError
Raised when the number of atoms in the molecule does not match
the length of the list of values in this object.
Returns
-------
str
A verbose description of the "ivary" instructions.
"""
if molecule is not None and len(molecule.atoms) != len(self.values):
raise ValueError(
f"The number of atoms ({len(molecule.atoms)} is not the same "
f"as the number of ivary values ({len(self.values)}."
)
zipped = zip_longest(self.values, molecule.atoms if molecule is not None else [])
f = io.StringIO()
for i, (ivary, atom) in enumerate(zipped):
atomic_number = atom.symbol if molecule is not None else None
id_str = f" ({atomic_number})" if atomic_number is not None else ""
if ivary < 0:
# TODO: This could also report at what value if charges are provided
ivary_str = ", frozen"
elif ivary > 0:
ivary_str = f", equivalenced to atom {ivary}"
else:
ivary_str = ""
print(f"Atom{id_str} number {i+1}{ivary_str}", file=f)
return f.getvalue()
@classmethod
def from_equivalence(cls: Type[IvaryT], equivalence: Equivalence) -> IvaryT:
"""Alternative initialization from equivalence information
.. note:
The resulting ivary instructions will correspond to fitting
with equivalent atoms assigned identical charges. This may not
be the type of fitting that you want to perform.
Parameters
----------
equivalence : Equivalence
Information about chemical equivalence of atoms in a molecule.
"""
return cls([0 if val is None else val + 1 for val in equivalence.values])
title: str
cntrl: Cntrl
subtitle: str
charge: int
molecule: Molecule[Atom]
ivary: Ivary
@property
def wtmol(self) -> float:
"""Relative weight of the structure in a multistructure fitting.
A value of 1.0 is always returned in the current implementation.
"""
return 1.0
@property
def iuniq(self) -> int:
"""The number of atoms in the fitted structure"""
return len(self.molecule.atoms)
def __post_init__(self) -> None:
if len(self.molecule.atoms) != len(self.ivary.values):
raise ValueError(
f"Number of atoms ({len(self.molecule.atoms)}) does not match number "
f"of ivary values ({len(self.ivary.values)})."
)
def _get_equivalence_from_ivary(ivary: Respin.Ivary) -> Equivalence:
"""Get atom equivalence information from an `Respin.Ivary` object
This function is private as users probably mean to use the
`get_equivalence` function instead.
`Ivary` objects are specific to ``resp`` program input and thus may not
provide information about atom equivalence. The "respin" file may have been
generated to perform any custom fitting with ``resp``. Only use this
function when you're certain that the "respin" file contains all the
equivalence information that you need.
"""
return Equivalence([
None if ivary_value == 0 else ivary_value - 1
for ivary_value in ivary.values
])
def get_equivalence_from_two_stage_resp_ivary(ivary1: Respin.Ivary, ivary2: Respin.Ivary) -> Equivalence:
"""Get atom equivalence from input files for two 2-stage RESP
Derive atom equivalence based on the data in two "respin" files
(represented by the `Respin` objects) created for the purpose of two-stage
RESP fitting with the ``resp`` program. The files can be generated with the
``respgen`` program with the following commands::
respgen -i methane.ac -o methane.respin1 -f resp1
respgen -i methane.ac -o methane.respin2 -f resp2
.. warning::
The correctness of this function relies on:
1. Antechamber and ``respgen`` correctly recognizing the symmetry
relations between atoms. Fast-exchanging atoms may not be identified.
2. The author's understanding of how ``respgen`` generates the "respin"
files for two-stage RESP fitting.
Thus it is advised to always check that the result of this function
agrees with the domain knowledge about the studied molecule.
"""
# The equivalence logic is explained somewhat inconsistently in the RESP
# papers but I've additionally re-engineered the ``resp`` program's logic
# to be sure that reading both the ``respin`` files will give the desired
# behaviour. In fact, it's pretty simple. In the first stage atoms of the
# methyl and methylene groups are free, while all the others are
# equivalenced. In the second stage the former are equivalenced, while all
# the others are frozen.
return _get_equivalence_from_ivary(Respin.Ivary([
max(ivary1_value, ivary2_value)
for ivary1_value, ivary2_value in zip_exact(ivary1.values, ivary2.values)
]))
def _parse_cntrl(f: TextIO) -> Respin.Cntrl:
line_re = re.compile(" (\w+) =\s+([0-9.]+)")
kwargs: Dict[str, Union[int, float]] = {}
for line in f:
if line.rstrip('\n') == " &end":
break
if line.rstrip('\n') == "":
continue
line_match = line_re.match(line)
if line_match is None:
raise InputFormatError(
f"Failed parsing cntrl section of respin file:\n{line}"
)
key = line_match.group(1)
value = line_match.group(2)
kwargs[key] = float(value) if key == "qwt" else int(value)
# nmol is not a parameter of Cntrl.__init__ and must be equal to 1.
nmol = kwargs.pop("nmol", None)
if nmol is not None and nmol != 1:
raise InputFormatError("Parsing multiple structures is not supported")
return Respin.Cntrl(**kwargs) # type: ignore # (not sure why not recognized)
def parse_respin(f: TextIO) -> Respin:
"""Parse a file in the "respin" format (input format of ``resp``)
Note that only files describing a single structure fit are currently supported.
Parameters
----------
f : TextIO
File object opened in read mode containing the "respin" file.
Raises
------
InputFormatError
Raised when the file does not follow the expected format.
Returns
-------
Respin
Object representing the fitting instructions for the ``resp`` program.
"""
title = get_line(f)
for line in f:
if line == " &cntrl\n":
break
cntrl = _parse_cntrl(f)
wtmol = get_line(f).strip()
if not math.isclose(float(wtmol), 1.0, rel_tol=0, abs_tol=1e-6):
raise InputFormatError(
f"Encountered value of `wtmol` different from 1.0 ({wtmol}) but "
f"parsing is supported only for single-structure respin files."
)
subtitle = get_line(f)
charge_and_iuniq = get_line(f)
if len(charge_and_iuniq.split()) != 2:
raise InputFormatError(
f"Expected two ints for the line specifying charge and iuniq, found:\n{charge_and_iuniq}"
)
charge = int(charge_and_iuniq.split()[0])
iuniq = int(charge_and_iuniq.split()[1])
atoms: List[Atom] = []
ivary = Respin.Ivary([])
for line in f:
if line.rstrip('\n') == "":
break
if len(line.split()) != 2:
raise InputFormatError(
f"Expected two ints for the line specifying atom and ivary, found:\n{line}"
)
atoms.append(Atom(int(line.split()[0])))
ivary_value = int(line.split()[1])
# `respgen` uses a value of -99 but internally we use -1 as per resp spec.
ivary.values.append(ivary_value if ivary_value != -99 else -1)
if len(atoms) != iuniq:
raise InputFormatError(
f"The value of `iuniq` ({iuniq}) is different from the number of"
f"atoms in the described molecule ({len(atoms)})."
)
return Respin(
title,
cntrl,
subtitle,
charge,
Molecule(atoms),
ivary
)
def _write_cntrl(f: TextIO, cntrl: Respin.Cntrl, skip_defaults: bool) -> None:
default_cntrl: Dict[str, Union[int, float]] = asdict(Respin.Cntrl())
default_cntrl["nmol"] = 1
dict_: Dict[str, Union[int, float]] = asdict(cntrl)
dict_["nmol"] = cntrl.nmol
print(" &cntrl\n", file=f)
for key, value in dict_.items():
if key == "qwt":
print(" {} = {:.5f},".format(key, value), file=f)
else:
if not skip_defaults or value != default_cntrl[key]:
print(" {} = {},".format(key, value), file=f)
print("\n &end", file=f)
def write_respin(f: TextIO, respin: Respin, skip_cntrl_defaults: bool=True) -> None:
"""Write a "respin" file described by the given input data
Parameters
----------
f : TextIO
The file object to which the instructions are to be saved. The file
must be opened for writing.
respin : Respin
The dataclass representing all the instructions needed by the ``resp``
program.
skip_cntrl_defaults : bool, optional
When this option is set to True (default), fitting options in the "cntrl"
section with default values will not be written to the file.
"""
print(respin.title, file=f)
print(file=f)
_write_cntrl(f, respin.cntrl, skip_cntrl_defaults)
print(FW("F7.1").write([respin.wtmol]), file=f)
print(respin.subtitle, file=f)
print(FW("2I5").write([respin.charge, respin.iuniq]), file=f)
for atom, ivary in zip(respin.molecule.atoms, respin.ivary.values):
print(FW("2I5").write([atom.atomic_number, ivary]), file=f)
# According to the spec, a blank line is only for multi-structures but
# `resp` fails without it.
print(file=f)
|
gpl-3.0
| 4,448,487,963,937,923,600
| 35.358634
| 105
| 0.604405
| false
| 4.170875
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_12_01/aio/operations/_route_tables_operations.py
|
1
|
23236
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_12_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2016_12_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_12_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
mit
| -8,438,199,354,132,655,000
| 47.712788
| 191
| 0.638879
| false
| 4.319762
| true
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/cosmos/azure-cosmos/test/test_diagnostics.py
|
1
|
1357
|
import unittest
import pytest
import azure.cosmos.diagnostics as m
_common = {
'x-ms-activity-id',
'x-ms-session-token',
'x-ms-item-count',
'x-ms-request-quota',
'x-ms-resource-usage',
'x-ms-retry-after-ms',
}
_headers = dict(zip(_common, _common))
_headers['other'] = 'other'
class BaseUnitTests(unittest.TestCase):
def test_init(self):
rh = m.RecordDiagnostics()
assert rh.headers == {}
def test_headers(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
assert rh.headers == _headers
assert rh.headers is not _headers
def test_headers_case(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
rh_headers = rh.headers
for key in rh.headers.keys():
assert key.upper() in rh_headers
assert key.lower() in rh_headers
def test_common_attrs(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
for name in _common:
assert rh.headers[name] == name
attr = name.replace('x-ms-', '').replace('-', '_')
assert getattr(rh, attr) == name
def test_other_attrs(self):
rh = m.RecordDiagnostics()
rh(_headers, "body")
assert rh.headers['other'] == 'other'
with pytest.raises(AttributeError):
rh.other
|
mit
| 9,059,282,863,094,365,000
| 25.607843
| 62
| 0.572587
| false
| 3.63807
| true
| false
| false
|
Storj/pyp2p
|
tests/test_sock.py
|
1
|
15565
|
"""
* Test whether multiple recvs on the same connection (non-blocking) will
eventually have the connection closed (use another net instance.)
* Test whether multiple sends on the same connection (non-blocking) will
eventually lead to the connection being closed (use a net instance with
no recvs! and loop over the cons)
(Not implemented for now since these will greatly slow the build.)
"""
import hashlib
import os
import tempfile
from threading import Thread
from unittest import TestCase
from pyp2p.net import rendezvous_servers
from pyp2p.rendezvous_client import RendezvousClient
from pyp2p.sock import *
if sys.version_info >= (3, 0, 0):
from urllib.parse import urlparse
import socketserver as SocketServer
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
else:
from urlparse import urlparse
import SocketServer
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class ThreadingSimpleServer(
SocketServer.ThreadingMixIn,
HTTPServer
):
pass
def md5sum(fname):
my_hash = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
my_hash.update(chunk)
return my_hash.hexdigest()
class SockDownload:
def __init__(self, url, expected_hash, file_size, blocking=0,
encoding="ascii"):
"""
Download a file from a HTTP URL and compare it to an MD5 hash.
Uses the sock.py module for testing.
:param url: URL to download
:param expected_hash: MD5 hash of file (md5sum file from term)
:param file_size: size in bytes of the file to download
:param blocking: use blocking or non-blocking sockets
:return:
"""
url = urlparse(url)
location = url.netloc.split(":")
if len(location) == 1:
port = 80
host, = location
else:
host, port = location
con = Sock(host, port, blocking=blocking, debug=1)
req = self.build_request(host, url.path)
con.send(req, send_all=1)
buf = u""
eof = u"\r\n\r\n"
while buf != eof and con.connected:
ch = con.recv(1)
if len(ch):
buf += ch
eq = 0
for i in range(0, len(buf)):
if buf[i] != eof[eq]:
eq = 0
else:
eq += 1
# Reset buf.
if eq == len(eof):
break
fp, path = tempfile.mkstemp()
os.close(fp)
remaining = file_size
with open(path, "ab") as fp:
future = time.time() + 30 # Slow connections are slow.
while con.connected and remaining:
data = con.recv(remaining, encoding=encoding)
print(type(data))
if len(data):
remaining -= len(data)
fp.write(data)
time.sleep(0.0002)
# Fail safe:
if time.time() >= future:
break
found_hash = md5sum(path)
os.remove(path)
if expected_hash is not None:
assert(found_hash == expected_hash)
def build_request(self, host, resource):
req = "GET %s HTTP/1.1\r\n" % resource
req += "Host: %s\r\n\r\n" % host
return req
class SockUpload:
def __init__(self, upload_size, blocking=0):
host = u"185.86.149.128"
port = 80
resource = u"/upload_test.php"
content = self.build_content(upload_size)
con = Sock(host, port, blocking=blocking, debug=1)
req = self.build_request(host, resource, content)
con.send(req, send_all=1, timeout=6)
# Now do the actual upload.
remaining = upload_size
chunk_size = 4096
while con.connected and remaining:
sent = upload_size - remaining
msg = content[sent:sent + chunk_size]
sent = con.send(msg)
if sent:
remaining -= sent
# Get response.
con.set_blocking(1)
ret = con.recv(1024)
# Check response.
expected_hash = hashlib.sha256(content).hexdigest()
assert(expected_hash in ret)
def build_request(self, host, resource, content):
req = "POST %s HTTP/1.1\r\n" % resource
req += "Host: %s\r\n" % host
req += "User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) "
req += "Gecko/20100101 Firefox/42.0\r\n"
req += "Accept: text/html,"
req += "application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
req += "Accept-Language: en-US,en;q=0.5\r\n"
req += "Accept-Encoding: gzip, deflate\r\n"
req += "Connection: keep-alive\r\n"
req += "Content-Type: application/x-www-form-urlencoded\r\n"
req += "Content-Length: %d\r\n\r\n" % (len(content) + 5)
req += "test=" # Hence the extra + 5.
return req
def build_content(self, upload_size):
content = b"8" * upload_size
return content
def simple_server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 9000))
s.listen(0)
(clientsocket, address) = s.accept()
time.sleep(2)
s.close()
class TestSock(TestCase):
def test_http_upload_post(self):
SockUpload(1000 * 100)
def test_http_download(self):
SockDownload(
"http://mirror.internode.on.net/pub/test/1meg.test",
"e6527b4d5db05226f40f9f2e7750abfb",
1000000
)
def test_blocking_mode(self):
x = Sock()
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock(blocking=1)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock("www.example.com", 80, timeout=10)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
x = Sock("www.example.com", 80, blocking=1, timeout=10)
blocking = x.s.gettimeout()
if x.blocking:
assert(blocking >= 1 or blocking is None)
else:
assert(blocking == 0.0)
x.close()
def test_blocking_timeout(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
t = time.time()
s.recv_line(timeout=1)
if time.time() - t >= 4:
print("Manual timeout failed.")
assert 0
s.close()
def test_non_blocking_timeout(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
assert(s.recv_line() == u"")
assert(s.recv(1) == u"")
s.close()
def test_encoding(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
s.send_line("SOURCE TCP 50")
ret = s.recv(1, encoding="ascii")
if sys.version_info >= (3, 0, 0):
assert(type(ret) == bytes)
else:
assert(type(ret) == str)
assert(ret == b"R")
ret = s.recv_line()
assert(u"EMOTE" in ret)
s.send_line("SOURCE TCP 50")
ret = s.recv(1, encoding="unicode")
if sys.version_info >= (3, 0, 0):
assert(type(ret) == str)
else:
assert(type(ret) == unicode)
s.close()
def test_0000001_sock(self):
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
s = client.server_connect()
assert s.connected
s.send_line("SOURCE TCP 323")
assert s.connected
line = s.recv_line()
assert ("REMOTE" in line)
s = Sock("www.example.com", 80, blocking=0, timeout=10)
data = "GET / HTTP/1.1\r\n"
data += "Connection: close\r\n"
data += "Host: www.example.com\r\n\r\n"
s.send(data, send_all=1)
replies = ""
while s.connected:
for reply in s:
# Output should be unicode.
if sys.version_info >= (3, 0, 0):
assert (type(reply) == str)
else:
assert (type(reply) == unicode)
replies += reply
print(reply)
assert (s.connected != 1)
assert (replies != "")
s.close()
s.reconnect()
s.close()
s = Sock("www.example.com", 80, blocking=1, timeout=10)
s.send_line("GET / HTTP/1.1")
s.send_line("Host: www.example.com\r\n")
line = s.recv_line()
print(line)
print(type(line))
print(s.buf)
print(type(s.buf))
assert (line, "HTTP/1.1 200 OK")
if sys.version_info >= (3, 0, 0):
assert (type(line) == str)
else:
assert (type(line) == unicode)
s.close()
s = Sock()
s.buf = b"\r\nx\r\n"
x = s.parse_buf()
assert (x[0] == "x")
s.buf = b"\r\n"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\n\r\n"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\r\n\r\n"
x = s.parse_buf()
assert (x[0] == "\r")
s.buf = b"\r\n\r\n\r\nx"
x = s.parse_buf()
assert (x == [])
s.buf = b"\r\n\r\nx\r\nsdfsdfsdf\r\n"
x = s.parse_buf()
assert (x[0] == "x" and x[1] == "sdfsdfsdf")
s.buf = b"sdfsdfsdf\r\n"
s.parse_buf()
s.buf += b"abc\r\n"
x = s.parse_buf()
assert (x[0] == "abc")
s.buf += b"\r\ns\r\n"
x = s.parse_buf()
assert (x[0] == "s")
s.buf = b"reply 1\r\nreply 2\r\n"
s.replies = []
s.update()
assert (s.pop_reply(), "reply 1")
assert (s.replies[0], "reply 2")
def test_keep_alive(self):
old_system = platform.system
for os in ["Darwin", "Windows", "Linux"]:
def system_wrapper():
return os
platform.system = system_wrapper
sock = Sock()
# Sock option error - not supported on this OS.
try:
sock.set_keep_alive(sock.s)
except socket.error as e:
valid_errors = (10042, 22)
if e.errno not in valid_errors:
raise e
except AttributeError:
pass
sock.close()
platform.system = old_system
assert 1
def test_non_default_iface(self):
sock = Sock(interface="eth12")
try:
sock.connect("www.example.com", 80, timeout=10)
except (TypeError, socket.error) as e:
pass
sock.close()
assert 1
def test_ssl(self):
s = Sock(
"www.example.com",
443,
blocking=0,
timeout=10,
use_ssl=1
)
data = "GET / HTTP/1.1\r\n"
data += "Connection: close\r\n"
data += "Host: www.example.com\r\n\r\n"
s.send(data, send_all=1)
replies = ""
while s.connected:
for reply in s:
# Output should be unicode.
if sys.version_info >= (3, 0, 0):
assert (type(reply) == str)
else:
assert (type(reply) == unicode)
replies += reply
print(reply)
assert (s.connected != 1)
assert (replies != "")
def test_ssl_blocking_error(self):
# Blocking.
s = Sock(
"www.example.com",
443,
blocking=1,
timeout=2,
use_ssl=1,
debug=1
)
s.get_chunks()
s.close()
# Non-blocking.
s = Sock(
"www.example.com",
443,
blocking=0,
timeout=2,
use_ssl=1,
debug=1
)
s.get_chunks()
s.close()
def test_decoding_error(self):
SockDownload(
"http://mirror.internode.on.net/pub/test/1meg.test",
expected_hash=None,
file_size=1000,
blocking=0,
encoding="unicode"
)
def test_broken_send_con(self):
# Can't monkey patch socket on Linux.
if platform.system != "Windows":
return
port = 10121
server = ThreadingSimpleServer(('', port), SimpleHTTPRequestHandler)
sock = Sock("127.0.0.1", port, debug=1, timeout=6)
server.server_close()
print(sock.send(b"test"))
sock.close()
server = ThreadingSimpleServer(('', port), SimpleHTTPRequestHandler)
def close_server():
time.sleep(1)
server.server_close()
sock = Sock("127.0.0.1", port, debug=1, timeout=6)
Thread(target=close_server).start()
for i in range(0, 5):
print(sock.send(b"test"))
time.sleep(0.5)
sock.close
# Simulate send timeout!
sock = Sock(debug=1, blocking=1)
def raise_timeout():
time.sleep(1)
original_send = sock.s.send
def fake_send(data):
raise socket.timeout("timed out")
sock.s.send = fake_send
time.sleep(1)
sock.s.send = original_send
Thread(target=raise_timeout).start()
sock.connect("www.example.com", 80)
# You want to fill up the entire networking buffer
# so that it times out without the needed recv.
buf_size = sock.s.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) + 1
buf_size *= 2
sock.chunk_size = buf_size
total = 0
for i in range(0, 4):
x = sock.send(b"x" * buf_size)
total += x
if x < buf_size:
break
time.sleep(2.2)
sock.close()
# Test broken connection.
sock = Sock(debug=1, blocking=1)
def raise_timeout():
time.sleep(1)
original_send = sock.s.send
def fake_send(data):
return 0
sock.s.send = fake_send
time.sleep(1)
Thread(target=raise_timeout).start()
sock.connect("www.example.com", 80)
# You want to fill up the entire networking buffer
# so that it times out without the needed recv.
x = 1
timeout = time.time() + 10
while x and time.time() < timeout:
x = sock.send(b"x")
time.sleep(2.2)
sock.close()
def test_magic(self):
sock = Sock()
sock.replies = ["a", "b", "c"]
assert(len(sock) == 3)
assert(sock[0] == "a")
del sock[0]
assert(sock[0] == "b")
sock[0] = "x"
assert(sock[0] == "x")
y = list(reversed(sock))
assert(y == ["x", "c"])
|
mit
| -2,747,348,296,401,501,000
| 27.403285
| 78
| 0.511083
| false
| 3.649472
| true
| false
| false
|
julython/julython.org
|
july/people/migrations/0010_auto__add_userbadge.py
|
1
|
9221
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserBadge'
db.create_table(u'people_userbadge', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['july.User'])),
('badges', self.gf('jsonfield.fields.JSONField')(null=True, blank=True)),
))
db.send_create_signal(u'people', ['UserBadge'])
def backwards(self, orm):
# Deleting model 'UserBadge'
db.delete_table(u'people_userbadge')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'july.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'location_members'", 'null': 'True', 'to': u"orm['people.Location']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'picture_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['people.Project']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team_members'", 'null': 'True', 'to': u"orm['people.Team']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'people.commit': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'Commit'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'files': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Project']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['july.User']", 'null': 'True', 'blank': 'True'})
},
u'people.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'people.location': {
'Meta': {'object_name': 'Location'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.project': {
'Meta': {'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'forked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parent_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'repo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.team': {
'Meta': {'object_name': 'Team'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.userbadge': {
'Meta': {'object_name': 'UserBadge'},
'badges': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['july.User']"})
}
}
complete_apps = ['people']
|
mit
| 1,895,763,320,148,108,300
| 71.614173
| 195
| 0.544193
| false
| 3.609002
| false
| false
| false
|
astromme/classify-handwritten-characters
|
utils/gnt.py
|
1
|
1587
|
#!/usr/bin/env python3
import os
import sys
import numpy as np
from .tagcode import tagcode_to_unicode
def samples_from_gnt(f):
header_size = 10
# read samples from f until no bytes remaining
while True:
header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size: break
sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24)
tagcode = header[5] + (header[4]<<8)
width = header[6] + (header[7]<<8)
height = header[8] + (header[9]<<8)
assert header_size + width*height == sample_size
bitmap = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width))
yield bitmap, tagcode
def read_gnt_in_directory(gnt_dirpath):
for file_name in os.listdir(gnt_dirpath):
if file_name.endswith('.gnt'):
file_path = os.path.join(gnt_dirpath, file_name)
with open(file_path, 'rb') as f:
for bitmap, tagcode in samples_from_gnt(f):
yield bitmap, tagcode
def main():
import png
if len(sys.argv) != 3:
print("usage: {} gntfile outputdir".format(sys.argv[0]))
_, gntfile, outputdir = sys.argv
try:
os.makedirs(outputdir)
except FileExistsError:
pass
with open(gntfile) as f:
for i, (bitmap, tagcode) in enumerate(samples_from_gnt(f)):
character = tagcode_to_unicode(tagcode)
png.from_array(bitmap, 'L').save(os.path.join(outputdir, '{} {}.png'.format(character, i)))
if __name__ == "__main__":
main()
|
mit
| -8,479,756,461,216,267,000
| 28.388889
| 103
| 0.592313
| false
| 3.272165
| false
| false
| false
|
crawfordsm/pyspectrograph
|
PySpectrograph/WavelengthSolution/WavelengthSolution.py
|
1
|
3169
|
"""Wavelength Solution is a task describing the functional form for transforming
pixel position to wavelength. The inputs for this task are the given pixel position
and the corresponding wavelength. The user selects an input functional form and
order for that form. The task then calculates the coefficients for that form.
Possible options for the wavelength solution include polynomial, legendre, spline.
HISTORY
20090915 SMC Initially Written by SM Crawford
LIMITATIONS
20090915 SMC Need to add legendre, spline functions
"""
import numpy as np
from .LineSolution import LineSolution
from .ModelSolution import ModelSolution
class WavelengthSolution:
"""Wavelength Solution is a task describing the functional form for transforming
pixel position to wavelength.
"""
func_options = ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev', 'model']
def __init__(self, x, w, function='poly', order=3, niter=5, thresh=3,
sgraph=None, cfit='both', xlen=3162, yval=0):
self.sgraph = sgraph
self.function = function
self.order = order
self.niter = niter
self.thresh = thresh
self.cfit = cfit
self.xlen = xlen
self.yval = yval
self.set_array(x, w)
self.set_func()
def set_array(self, x, w):
self.x_arr = x
self.w_arr = w
def set_thresh(self, thresh):
self.thresh = thresh
def set_niter(self, niter):
self.niter = niter
def set_func(self):
if self.function in ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev']:
self.func = LineSolution(self.x_arr, self.w_arr, function=self.function,
order=self.order, niter=self.niter, thresh=self.thresh)
if self.function == 'model':
self.func = ModelSolution(self.x_arr, self.w_arr, sgraph=self.sgraph,
xlen=self.xlen, yval=self.yval, order=self.order)
def fit(self):
if self.function in ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev']:
self.func.interfit()
self.coef = self.func.coef
if self.function in ['model']:
self.func.fit(cfit=self.cfit)
self.coef = np.array([c() for c in self.func.coef])
# self.set_coef(coef)
def set_coef(self, coef):
if self.function in ['poly', 'polynomial', 'spline', 'legendre', 'chebyshev']:
self.func.coef = coef
self.coef = self.func.coef
if self.function in ['model']:
for i in range(len(self.func.coef)):
self.func.coef[i].set(coef[i])
self.coef = np.array([c() for c in self.func.coef])
def value(self, x):
return self.func.value(x)
def invvalue(self, w):
"""Given a wavelength, return the pixel position
"""
return w
def sigma(self, x, y):
"""Return the RMS of the fit """
return (((y - self.value(x)) ** 2).mean()) ** 0.5
def chisq(self, x, y, err):
"""Return the chi^2 of the fit"""
return (((y - self.value(x)) / err) ** 2).sum()
|
bsd-3-clause
| -8,209,459,238,024,676,000
| 32.712766
| 92
| 0.60082
| false
| 3.655133
| false
| false
| false
|
Guokr1991/ProstateSensitivityAnalysis
|
convert_histology_txt_json.py
|
1
|
2295
|
def main():
hist_txt_to_json()
def hist_txt_to_json():
j = open('HistologyLesions.json', 'w')
j.write('{\n')
index = True
benign = False
with open('HistologyLesions.txt', 'r') as t:
tfile = t.readlines()
num_lesions = len(tfile)
global td # define globally for ece_extent_writer method
for nl, td in enumerate(tfile):
td = td[:-1]
if 'pca' in td and index:
j.write('\t"pca": [\n')
j.write('\t\t{\n\t\t\t"region": "%s",\n' % td.split(',')[1][1:])
j.write('\t\t\t"volume_cc": %.1f,\n' % float(td.split(',')[2]))
j.write('\t\t\t"Gleason": %i,\n' % float(td.split(',')[3]))
j.write('\t\t\t"Staging": "%s",\n' % td.split(',')[4][1:4])
j.write('\t\t\t"ECE_extent": "%s",\n' % ece_extent_writer())
j.write('\t\t\t"index": true\n\t\t}')
index = False
if (nl+1) == num_lesions:
j.write(']\n')
elif 'pca' in td and not index:
j.write(',\n')
j.write('\t\t{\n\t\t\t"region": "%s",\n' % td.split(',')[1][1:])
j.write('\t\t\t"volume_cc": %.1f,\n' % float(td.split(',')[2]))
j.write('\t\t\t"Gleason": %i,\n' % float(td.split(',')[3]))
j.write('\t\t\t"Staging": "%s",\n' % td.split(',')[4][1:4])
j.write('\t\t\t"ECE_extent": "%s",\n' % ece_extent_writer())
j.write('\t\t\t"index": false\n\t\t}')
if (nl+1) == num_lesions:
j.write(']\n')
elif ('atrophy' in td) or ('bph' in td):
if not benign:
j.write('],\n')
else:
j.write(',\n')
num_regions = len(td.split(',')[1:])
j.write('\t"%s": {\n\t\t"regions": [' % td.split(',')[0])
for n, r in enumerate(td.split(',')[1:]):
if n < (num_regions-1):
j.write('"%s", ' % r[1:])
else:
j.write('"%s"]\n\t\t}' % r[1:])
benign = True
j.write('\n}')
def ece_extent_writer():
if td.split(',')[4][-1] == 'E':
return "Established"
elif td.split(',')[4][-1] == 'F':
return "Focal"
else:
return "None"
if __name__ == '__main__':
main()
|
apache-2.0
| -3,372,639,854,298,931,000
| 33.253731
| 76
| 0.423965
| false
| 2.87234
| false
| false
| false
|
harlowja/speedlimit
|
speedlimit/__init__.py
|
1
|
3653
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import time
from monotonic import monotonic as _now
class SpeedLimit(object):
"""Speed/limiting iterator wrapper object.
A wrapper object that uses the `token bucket`_ algorithm to limit the
rate at which values comes out of an iterable. This can be used to limit
the consumption speed of iteration of some other iterator (or iterable).
.. _token bucket: http://en.wikipedia.org/wiki/Token_bucket
"""
def __init__(self,
# How many items to yield from the provided
# wrapped iterator (per second).
items_per_second,
# Used to simulate a thread with its own 'tic rate'. Making
# this smaller affects the accuracy of the 'tic' calculation,
# which affects the accuracy of consumption (and delays).
refresh_rate_seconds=0.01,
# How *full* the initial bucket is.
initial_bucket_size=1,
# Made a keyword argument, so one could replace this
# with a eventlet.sleep or other idling function...
sleep_func=time.sleep):
self._refresh_rate_seconds = refresh_rate_seconds
self._bucket = (items_per_second *
refresh_rate_seconds * initial_bucket_size)
self._items_per_tic = items_per_second * refresh_rate_seconds
self._next_fill = _now() + refresh_rate_seconds
self._sleep = sleep_func
def _check_fill(self):
# Fill the bucket based on elapsed time.
#
# This simulates a background thread...
now = _now()
if now > self._next_fill:
d = now - self._next_fill
tics = int(math.ceil(d / self._refresh_rate_seconds))
self._bucket += tics * self._items_per_tic
self._next_fill += tics * self._refresh_rate_seconds
def speed_limit_iter(self, itr, chunk_size_cb=None):
"""Return an iterator/generator which limits after each iteration.
:param itr: an iterator to wrap
:param chunk_size_cb: a function that can calculate the
size of each chunk (if none provided this
defaults to 1)
"""
for chunk in itr:
if chunk_size_cb is None:
sz = 1
else:
sz = chunk_size_cb(chunk)
self._check_fill()
if sz > self._bucket:
now = _now()
tics = int((sz - self._bucket) / self._items_per_tic)
tm_diff = self._next_fill - now
secs = tics * self._refresh_rate_seconds
if tm_diff > 0:
secs += tm_diff
self._sleep(secs)
self._check_fill()
self._bucket -= sz
yield chunk
|
apache-2.0
| 4,321,641,280,246,281,700
| 40.044944
| 78
| 0.588284
| false
| 4.34881
| false
| false
| false
|
dubourg/openturns
|
python/test/t_FisherSnedecor_std.py
|
1
|
4992
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
from cmath import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
distribution = FisherSnedecor(5.5, 10.5)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", repr(oneRealization))
# Test for sampling
size = 10000
oneSample = distribution.getSample(size)
print("oneSample first=", repr(
oneSample[0]), " last=", repr(oneSample[size - 1]))
print("mean=", repr(oneSample.computeMean()))
print("covariance=", repr(oneSample.computeCovariance()))
size = 100
for i in range(2):
msg = ''
if FittingTest.Kolmogorov(distribution.getSample(size), distribution).getBinaryQualityMeasure():
msg = "accepted"
else:
msg = "rejected"
print(
"Kolmogorov test for the generator, sample size=", size, " is", msg)
size *= 10
# Define a point
point = NumericalPoint(distribution.getDimension(), 1.0)
print("Point= ", repr(point))
# Show PDF and CDF of point
eps = 1e-5
# derivative of PDF with regards its arguments
DDF = distribution.computeDDF(point)
# print "ddf =" , repr(DDF)
# by the finite difference technique
# print "ddf (FD)=" ,repr(NumericalPoint(1, (distribution.computePDF(
# point + NumericalPoint(1, eps) ) - distribution.computePDF( point +
# NumericalPoint(1, -eps) )) / (2.0 * eps)))
# PDF value
LPDF = distribution.computeLogPDF(point)
print("log pdf=%.6f" % LPDF)
PDF = distribution.computePDF(point)
print("pdf =%.6f" % PDF)
# by the finite difference technique from CDF
print("pdf (FD)=%.6f" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) -
distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps)))
# derivative of the PDF with regards the parameters of the distribution
CDF = distribution.computeCDF(point)
print("cdf=%.6f" % CDF)
CCDF = distribution.computeComplementaryCDF(point)
print("ccdf=%.6f" % CCDF)
CF = distribution.computeCharacteristicFunction(point[0])
print("characteristic function=(%.6f+%.6fj)" % (CF.real, CF.imag))
## PDFgr = distribution.computePDFGradient( point )
# print "pdf gradient =" , repr(PDFgr)
# by the finite difference technique
## PDFgrFD = NumericalPoint(2)
## PDFgrFD[0] = (FisherSnedecor(distribution.getLambda() + eps, distribution.getGamma()).computePDF(point) - FisherSnedecor(distribution.getLambda() - eps, distribution.getGamma()).computePDF(point)) / (2.0 * eps)
## PDFgrFD[1] = (FisherSnedecor(distribution.getLambda(), distribution.getGamma() + eps).computePDF(point) - FisherSnedecor(distribution.getLambda(), distribution.getGamma() - eps).computePDF(point)) / (2.0 * eps)
# print "pdf gradient (FD)=" , repr(PDFgrFD)
# derivative of the PDF with regards the parameters of the distribution
## CDFgr = distribution.computeCDFGradient( point )
# print "cdf gradient =" , repr(CDFgr)
## CDFgrFD = NumericalPoint(2)
## CDFgrFD[0] = (FisherSnedecor(distribution.getLambda() + eps, distribution.getGamma()).computeCDF(point) - FisherSnedecor(distribution.getLambda() - eps, distribution.getGamma()).computeCDF(point)) / (2.0 * eps)
## CDFgrFD[1] = (FisherSnedecor(distribution.getLambda(), distribution.getGamma() + eps).computeCDF(point) - FisherSnedecor(distribution.getLambda(), distribution.getGamma() - eps).computeCDF(point)) / (2.0 * eps)
# print "cdf gradient (FD)=", repr(CDFgrFD)
# quantile
quantile = distribution.computeQuantile(0.95)
print("quantile=", repr(quantile))
print("cdf(quantile)=%.6f" % distribution.computeCDF(quantile))
mean = distribution.getMean()
print("mean=", repr(mean))
standardDeviation = distribution.getStandardDeviation()
print("standard deviation=", repr(standardDeviation))
skewness = distribution.getSkewness()
print("skewness=", repr(skewness))
kurtosis = distribution.getKurtosis()
print("kurtosis=", repr(kurtosis))
covariance = distribution.getCovariance()
print("covariance=", repr(covariance))
parameters = distribution.getParametersCollection()
print("parameters=", repr(parameters))
for i in range(6):
print("standard moment n=", i, " value=",
distribution.getStandardMoment(i))
print("Standard representative=", distribution.getStandardRepresentative())
except:
import sys
print("t_FisherSnedecor_std.py", sys.exc_info()[0], sys.exc_info()[1])
|
gpl-3.0
| -8,776,602,948,175,847,000
| 42.408696
| 217
| 0.672276
| false
| 3.635834
| false
| false
| false
|
Fuchai/Philosophy-Machine
|
knowledgeframework/playground2.py
|
1
|
3640
|
from knowledgeframework.kf2 import *
kf=KF()
pred1 = Predicate(id="male")
pred2 = Predicate(id="single")
pred4 = Predicate(id="bachelor")
pred5 = Predicate(id="thief")
pred6 = Predicate(id="steals")
pred7 = Predicate(id="female")
pred8 = Predicate(id="human")
kf.add_node(pred1, name="male")
kf.add_node(pred2, name="man")
kf.add_node(pred4, name="bachelor")
pred3 = Analytical(kf, lambda x, y: x and y, (pred1, pred2), id="AND")
kf.add_node(pred3)
kf.add_node(pred6, name="thief")
kf.add_node(pred5, name="steals")
kf.add_edge(pred5, pred6)
kf.add_node(pred7)
kf.add_node(pred8)
kf.make_dimension(pred8) # pred8 human is a dimension
kf.add_edge_to_instance(pred1) # known man
kf.add_edge_to_instance(pred2) # known male
print(kf.consistency_search_through_dimensions(kf.get_instance()))
pred9=Predicate(id="dummy")
kf.add_node(pred9)
kf.add_edge_to_instance(pred7)
print(kf.consistency_search_through_dimensions(kf.get_instance()))
print(kf.static_proof("bachelor"))
print(kf.dynamic_eval("bachelor"))
# pred1=Predicate(id="1")
# pred2=Predicate(id="2")
# pred4=Predicate(id="4")
# pred5=Predicate(id="5")
# pred6=Predicate(id="6")
# pred7=Predicate(id="7")
#
#
#
# knowledgeframework.add_node(pred1,name="male")
# knowledgeframework.add_node(pred2,name="man")
# pred3=Analytical(knowledgeframework,lambda x,y: x and y,(pred1,pred2),id="3")
#
# knowledgeframework.add_node(pred3) # bachelor analytical
# knowledgeframework.add_node(pred4,name="bachelor")
# knowledgeframework.add_edge(pred3,pred4)
#
# knowledgeframework.add_node(pred6,name="thief")
# knowledgeframework.add_node(pred5,name="steals")
# knowledgeframework.add_edge(pred5,pred6)
# knowledgeframework.add_node(pred7)
#
# knowledgeframework.add_edge_to_instance(pred1) # known man
# knowledgeframework.add_edge_to_instance(pred2) # known male
#
# # if pred3.eval()==True:
# # knowledgeframework.add_edge_to_instance(pred3)
#
# print(knowledgeframework.static_proof(pred4))
# print(knowledgeframework.hybrid_eval_I(pred4))
# True!
# knowledgeframework=KF()
# pred1=Predicate()
# pred2=Predicate()
#
# knowledgeframework.add_node(pred1,name="red")
# knowledgeframework.add_node(pred2,name="apple")
# knowledgeframework.add_edge(pred2,pred1)
#
# print(1,knowledgeframework.nodes())
# print(knowledgeframework.edges())
# print(2,knowledgeframework.find_name("apple"))
# print(3,knowledgeframework.find_name("red"))
# print(4,knowledgeframework.predecessors_set(knowledgeframework.find_name("apple")))
# #print(knowledgeframework.predecessors(knowledgeframework[knowledgeframework.find_name("apple")[0]]))
#
#
# # Noteworthy:"apple"'s successor is the "name" labels, and the predecessors return all the names. This is an interesting
# # operation, since it basically returns everything that has some directly overlapped properties.
# print(knowledgeframework.predecessors_set(knowledgeframework.successors_set(["apple"])))
#
# dummy1=knowledgeframework.nodes()[3]
# dummy2=knowledgeframework.nodes()[4]
#
# # Example of a proof that an apple is red
# print(5,knowledgeframework.get_node_from_hash("instance") in knowledgeframework.nodes())
# knowledgeframework.add_edge(knowledgeframework.get_node_from_hash("instance"),knowledgeframework.find_name("apple")[0])
# print(networkx.shortest_path(knowledgeframework,knowledgeframework.get_instance(),knowledgeframework.find_name("red")[0]))
#
# # Fast query
# knowledgeframework.static_proof(knowledgeframework.find_name_unqiue("red"))
#
#
# def hello(*args):
# for i in args:
# print(i)
#
# print (type(args))
#
#
# def ya(i, *args):
# print("starting")
# hello(*args)
#
#
#
# ya(2, 3, 4, 5)
|
apache-2.0
| 8,127,122,852,842,790,000
| 29.341667
| 124
| 0.736538
| false
| 2.821705
| false
| false
| false
|
tensorflow/privacy
|
tensorflow_privacy/privacy/estimators/multi_label_head.py
|
1
|
6457
|
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiclass head for Estimator that allow integration with TF Privacy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras.utils import losses_utils # pylint: disable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.head import base_head
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
class DPMultiLabelHead(tf.estimator.MultiLabelHead):
"""Creates a TF Privacy-enabled version of MultiLabelHead."""
def __init__(self,
n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
classes_for_class_based_metrics=None,
name=None):
if loss_reduction == tf.keras.losses.Reduction.NONE:
loss_reduction = tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
super(DPMultiLabelHead, self).__init__(
n_classes=n_classes,
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=loss_fn,
classes_for_class_based_metrics=classes_for_class_based_metrics,
name=name)
def loss(self,
labels,
logits,
features=None,
mode=None,
regularization_losses=None):
"""Returns regularized training loss. See `base_head.Head` for details."""
del mode # Unused for this head.
with tf.compat.v1.name_scope(
'losses', values=(logits, labels, regularization_losses, features)):
logits = base_head.check_logits_final_dim(logits, self.logits_dimension)
labels = self._processed_labels(logits, labels)
unweighted_loss, weights = self._unweighted_loss_and_weights(
logits, labels, features)
vector_training_loss = losses_utils.compute_weighted_loss(
unweighted_loss,
sample_weight=weights,
reduction=tf.keras.losses.Reduction.NONE)
regularization_loss = tf.math.add_n(
regularization_losses) if regularization_losses is not None else None
vector_regularized_training_loss = (
tf.add(vector_training_loss, regularization_loss)
if regularization_loss is not None else vector_training_loss)
return vector_regularized_training_loss
def _create_tpu_estimator_spec(self,
features,
mode,
logits,
labels=None,
optimizer=None,
trainable_variables=None,
train_op_fn=None,
update_ops=None,
regularization_losses=None):
"""See superclass for description."""
with tf.compat.v1.name_scope(self._name, 'head'):
# Predict.
pred_keys = prediction_keys.PredictionKeys
predictions = self.predictions(logits)
if mode == ModeKeys.PREDICT:
probabilities = predictions[pred_keys.PROBABILITIES]
classifier_output = base_head.classification_output(
scores=probabilities,
n_classes=self._n_classes,
label_vocabulary=self._label_vocabulary)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
base_head.DEFAULT_SERVING_KEY:
classifier_output,
base_head.CLASSIFY_SERVING_KEY:
classifier_output,
base_head.PREDICT_SERVING_KEY:
export_output.PredictOutput(predictions)
})
regularized_training_loss = self.loss(
logits=logits,
labels=labels,
features=features,
mode=mode,
regularization_losses=regularization_losses)
scalar_loss = tf.reduce_mean(regularized_training_loss)
# Eval.
if mode == ModeKeys.EVAL:
eval_metrics = self.metrics(regularization_losses=regularization_losses)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.EVAL,
predictions=predictions,
loss=scalar_loss,
eval_metrics=base_head.create_eval_metrics_tuple(
self.update_metrics, {
'eval_metrics': eval_metrics,
'features': features,
'logits': logits,
'labels': labels,
'regularization_losses': regularization_losses
}))
# Train.
train_op = base_head.create_estimator_spec_train_op(
head_name=self._name,
optimizer=optimizer,
train_op_fn=train_op_fn,
update_ops=update_ops,
trainable_variables=trainable_variables,
regularized_training_loss=regularized_training_loss,
loss_reduction=self._loss_reduction)
# Create summary.
base_head.create_estimator_spec_summary(
regularized_training_loss=scalar_loss,
regularization_losses=regularization_losses,
summary_key_fn=self._summary_key)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.TRAIN,
predictions=predictions,
loss=scalar_loss,
train_op=train_op)
|
apache-2.0
| 1,296,019,722,160,518,000
| 41.480263
| 100
| 0.628775
| false
| 4.386549
| false
| false
| false
|
DentonJC/virtual_screening
|
moloi/descriptors/morgan_descriptor.py
|
1
|
1427
|
#!/usr/bin/env python
"""
https://github.com/kudkudak
"""
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
def smiles_to_morgan(smi, hashed=True, radius=2, n_bits=300):
mol = Chem.MolFromSmiles(smi)
if hashed:
try:
vect = AllChem.GetHashedMorganFingerprint(mol=mol,
radius=radius,
nBits=n_bits)
vect = vect.GetNonzeroElements()
vect_keys = list(vect.keys())
vect_values = list(vect.values())
# Not sure how to transform it better
vect_dense = np.zeros(shape=(n_bits,))
vect_dense[vect_keys] = vect_values
return vect_dense
except:
print("Failed computing morgan fingerprint for %s", smi)
return np.zeros(shape=(n_bits,))
else:
try:
mol = Chem.MolFromSmiles(smi)
vect = AllChem.GetMorganFingerprintAsBitVect(mol=mol,
radius=radius,
nBits=n_bits)
return np.array(vect)
except:
print("Failed computing morgan fingerprint for %s", smi)
return np.zeros(shape=(n_bits,))
if __name__ == "__main__":
features = smiles_to_morgan("CC")
print(features)
|
gpl-3.0
| 3,558,702,890,475,788,000
| 31.431818
| 71
| 0.501752
| false
| 3.941989
| false
| false
| false
|
maym2104/ift6266-h17-project
|
lib/updates.py
|
1
|
9422
|
"""
Copyright (c) 2017 - Philip Paquette
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Modified from https://raw.githubusercontent.com/Newmu/dcgan_code/master/lib/updates.py
# MIT License
import theano
import theano.tensor as T
from .utils import floatX
from .layers import l2norm
# ------------------------
# Regularization
# ------------------------
def clip_norm(grad, clip, norm):
if clip > 0:
grad = T.switch(T.ge(norm, clip), grad * clip / norm, grad)
return grad
def clip_norms(grads, clip):
norm = T.sqrt(sum([T.sum(grad ** 2) for grad in grads]))
return [clip_norm(grad, clip, norm) for grad in grads]
# Base regularizer
class Regularizer(object):
def __init__(self, l1=0., l2=0., maxnorm=0., l2norm=False, frobnorm=False):
self.__dict__.update(locals())
def max_norm(self, param, maxnorm):
if maxnorm > 0:
norms = T.sqrt(T.sum(T.sqr(param), axis=0))
desired = T.clip(norms, 0, maxnorm)
param = param * (desired / (1e-7 + norms))
return param
def l2_norm(self, param):
return param / l2norm(param, axis=0)
def frob_norm(self, param, nrows):
return (param / T.sqrt(T.sum(T.sqr(param)))) * T.sqrt(nrows)
def gradient_regularize(self, param, grad):
grad += param * self.l2
grad += T.sgn(param) * self.l1
return grad
def weight_regularize(self, param):
param = self.max_norm(param, self.maxnorm)
if self.l2norm:
param = self.l2_norm(param)
if self.frobnorm > 0:
param = self.frob_norm(param, self.frobnorm)
return param
# ------------------------
# Updates
# ------------------------
class Update(object):
def __init__(self, regularizer=Regularizer(), clipnorm=0.):
self.__dict__.update(locals())
def __call__(self, params, grads):
raise NotImplementedError
# Stochastic Gradient Descent
class SGD(Update):
def __init__(self, lr=0.01, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
updated_param = param - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with momentum
class Momentum(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updates.append((m, v))
updated_param = param + v
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with Nesterov Accelerated Gradient
class Nesterov(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updated_param = param + self.momentum * v - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((m, v))
updates.append((param, updated_param))
return updates
# RMS Prop
class RMSprop(Update):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
updated_param = param - self.lr * (grad / T.sqrt(acc_new + self.epsilon))
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# Adam
class Adam(Update):
def __init__(self, lr=0.001, b1=0.9, b2=0.999, e=1e-8, l=1 - 1e-8, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
t = theano.shared(floatX(1.))
b1_t = self.b1 * self.l ** (t - 1)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = theano.shared(param.get_value() * 0.)
m_t = b1_t * m + (1 - b1_t) * grad
v_t = self.b2 * v + (1 - self.b2) * grad ** 2
m_c = m_t / (1 - self.b1 ** t)
v_c = v_t / (1 - self.b2 ** t)
p_t = param - (self.lr * m_c) / (T.sqrt(v_c) + self.e)
p_t = self.regularizer.weight_regularize(p_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((param, p_t))
updates.append((t, t + 1.))
return updates
# AdaGrad
class Adagrad(Update):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_t = acc + grad ** 2
updates.append((acc, acc_t))
p_t = param - (self.lr / T.sqrt(acc_t + self.epsilon)) * grad
p_t = self.regularizer.weight_regularize(p_t)
updates.append((param, p_t))
return updates
# AdeDelta
class Adadelta(Update):
def __init__(self, lr=0.5, rho=0.95, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_delta = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
update = grad * T.sqrt(acc_delta + self.epsilon) / T.sqrt(acc_new + self.epsilon)
updated_param = param - self.lr * update
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
acc_delta_new = self.rho * acc_delta + (1 - self.rho) * update ** 2
updates.append((acc_delta, acc_delta_new))
return updates
# No updates
class NoUpdate(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
for param in params:
updates.append((param, param))
return updates
|
mit
| 2,983,714,831,106,733,600
| 36.537849
| 93
| 0.585863
| false
| 3.546105
| false
| false
| false
|
Stunkymonkey/passworts
|
calc.py
|
1
|
2206
|
#!/usr/bin/env python3
import os.path
from collections import defaultdict
import pickle
from optparse import OptionParser
import sys
n = 3
def analyse(counts, text, n):
"""analyse text with n chars markov state, update the counts"""
text = '^' * n + text + '$' * n
for i in range(len(text) - n):
st = i, text[i:i + n]
next = text[i + n]
counts[st][next] += 1
return counts
def compute_prob(counts):
"""compute ranges in [0 .. 1) of the given words"""
for c1 in counts:
total = float(sum(counts[c1][c2] for c2 in counts[c1]))
base = 0.0
for c2 in counts[c1]:
prob = counts[c1][c2] / total
base = base + prob
counts[c1][c2] = base
return counts
def text_import(dict_path, source):
"""reads a file to analyse"""
try:
with open(dict_path + source, "r", encoding="ISO-8859-1") as f:
text = set(f.read().split())
except FileNotFoundError as e:
raise SystemExit("Could not open text file: " + str(e))
return text
def dd():
return defaultdict(int)
def calculate(source):
print("reading...")
dict_path = os.path.join(os.path.abspath(".") + r"/dict/")
text = text_import(dict_path, source)
source = source.split(".")[0]
print("analysing text...")
counts = defaultdict(dd)
for word in text:
counts = analyse(counts, word, n)
print("calculating...")
counts = compute_prob(counts)
# print(type(counts))
# print(counts)
# save to file
print("write...")
with open((dict_path + source + '.pickle'), 'wb') as handle:
pickle.dump(counts, handle)
print("checking file...")
with open((dict_path + source + '.pickle'), 'rb') as handle:
written = pickle.load(handle)
if written == counts:
print("Calucation was sucessfull")
else:
print("Something went wrong")
sys.exit(1)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f", "--file", type="string", dest="filename",
help="Name of the input file")
(options, args) = parser.parse_args()
calculate(options.filename)
|
mit
| 6,256,238,498,084,905,000
| 23.511111
| 71
| 0.577063
| false
| 3.616393
| false
| false
| false
|
wolfsonliu/crispr
|
pycas/script/pycasanalysis.py
|
1
|
3253
|
#! /bin/env python3
__all__ = ['pycaspattern']
# ------------------
# Libraries
# ------------------
import argparse
import os
import sys
sys.path.append('/gpfs/user/liuzh/Code/crispr')
import pandas as pd
from pycas.analysis import Screening
from pycas.utils.decorator import helpstring
from pycas.utils.decorator import AppendHelp
# ------------------
# Functions
# ------------------
def pycasanalysis(filename,
ctrl_label,
exp_label,
method,
hasbarcode,
out_dir):
file_type = 'csv'
if filename.split('.')[-1] == 'txt':
sep = '\t'
file_type = 'txt'
elif filename.split('.')[-1] == 'csv':
sep = ','
file_type = 'csv'
else:
raise ValueError('Input data file should be txt or csv')
if file_type == 'csv':
data = pd.read_csv(filename, header=0)
else:
data = pd.read_table(filename, header=0, sep='\t')
for x in ['gene', 'guide', 'barcode']:
if x not in data.columns:
raise ValueError('Input data file should contain column named as: ' + x)
if len(ctrl_label) != len(exp_label):
raise ValueError('Input control labels and treatment labels should be of the same length.')
if out_dir != '' and not os.path.exists(out_dir):
os.mkdir(out_dir)
if method not in ['sunbird', 'mw']:
raise ValueError('The test method should be in: sunbird mw.')
analysis = Screening(data, ctrl_label, exp_label, hasbarcode=hasbarcode)
if method == 'sunbird':
analysis.sunbird(10)
analysis.test['sunbird'].to_csv(
os.path.join(out_dir, 'pycas_analysis_sunbird.csv')
)
else:
pass
# ------------------
# Main
# ------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='pycasanalysis',
description='Analysis the library screening result.'
)
parser.add_argument(
'-i', '--input', required=True,
help='Input data file path, with columns: gene, guide <, barcode>, [labels]. (column names should be in the csv file).'
)
parser.add_argument(
'-c', '--control-label', nargs='+',
help='Control experiment labels, separeted by space.'
)
parser.add_argument(
'-t', '--treat-label', nargs='+',
help='Treatment experiment labels, separeted by space.'
)
parser.add_argument(
'-m', '--method', default='sunbird',
help='Method to be used in the analysis: sunbird, mw.'
)
parser.add_argument(
'--has-barcode', action='store_true',
help='Input data should be tested consider barcodes.'
)
parser.add_argument(
'--out-dir', default='',
help='Result output directory,default is current work directory.'
)
args = parser.parse_args()
def analysis(**args):
pycasanalysis(
filename=args['input'],
ctrl_label=args['control_label'],
exp_label=args['treat_label'],
method=args['method'],
hasbarcode=args['has_barcode'],
out_dir=args['out_dir']
)
analysis(**vars(args))
# ------------------
# EOF
# ------------------
|
gpl-3.0
| 329,395,760,262,836,000
| 29.383178
| 127
| 0.550907
| false
| 3.907452
| false
| false
| false
|
alberthdev/cihelper
|
cihelper/download.py
|
1
|
2101
|
#!/usr/bin/env python3
import os
import requests
import time
from collections import namedtuple
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
class TimeoutRequestsSession(requests.Session):
def __init__(self, *args, **kwargs):
self.__default_timeout = None
if 'timeout' in kwargs:
self.__default_timeout = kwargs.pop('timeout')
super().__init__(*args, **kwargs)
def request(self, *args, **kwargs):
if self.__default_timeout:
kwargs.setdefault('timeout', self.__default_timeout)
return super(TimeoutRequestsSession, self).request(*args, **kwargs)
SessionSettings = namedtuple("SessionSettings",
["total_retries", "timeout", "backoff_factor", "status_forcelist"])
cached_sessions = {}
def get_session(total_retries=5, timeout=60, backoff_factor=1, status_forcelist=None):
if not status_forcelist:
status_forcelist = (500, 502, 503, 504)
settings = SessionSettings(total_retries=total_retries, timeout=timeout,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
if settings in cached_sessions:
return cached_sessions[settings]
session = TimeoutRequestsSession(timeout=timeout)
retries = Retry(total=total_retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("httsp://", HTTPAdapter(max_retries=retries))
cached_sessions[settings] = session
return session
def check_url(session, url):
response = session.get(url)
return response.status_code < 400
def download_file(session, url, dest=None, chunk_size=8192):
dl_attempts = 0
dest = dest or os.path.basename(url)
with session.get(url, stream=True) as response:
response.raise_for_status()
with open(dest, 'wb') as fh:
for chunk in r.iter_content(chunk_size=chunk_size):
fh.write(chunk)
return dest
|
mit
| -3,698,014,187,821,694,000
| 30.358209
| 96
| 0.656354
| false
| 4.03263
| false
| false
| false
|
kennt/fixtest
|
simple/logon_controller.py
|
1
|
4782
|
""" Simple client/server controller for testing.
Copyright (c) 2014 Kenn Takara
See LICENSE for details
"""
import logging
from fixtest.base.asserts import *
from fixtest.base.controller import TestCaseController
from fixtest.fix.constants import FIX
from fixtest.fix.messages import logon_message, logout_message
from fixtest.fix.transport import FIXTransportFactory
class LogonController(TestCaseController):
""" The base class for FIX-based TestCaseControllers.
This creates a client and a server that will
communicate with each other. So they will use
the same link config.
"""
def __init__(self, **kwargs):
super(LogonController, self).__init__(**kwargs)
self.testcase_id = 'Simple-1'
self.description = 'Test of the command-line tool'
config = kwargs['config']
self.server_config = config.get_role('test-server')
self.server_config.update({'name': 'server-9940'})
self.server_link_config = config.get_link('client', 'test-server')
self.server_link_config.update({
'sender_compid': self.server_link_config['test-server'],
'target_compid': self.server_link_config['client'],
})
self.client_config = config.get_role('client')
self.client_config.update({'name': 'client-9940'})
self.client_link_config = config.get_link('client', 'test-server')
self.client_link_config.update({
'sender_compid': self.client_link_config['client'],
'target_compid': self.client_link_config['test-server'],
})
self._servers = dict()
self._clients = dict()
factory = FIXTransportFactory('server-9940',
self.server_config,
self.server_link_config)
factory.filter_heartbeat = False
server = {
'name': 'server-9940',
'port': self.server_link_config['port'],
'factory': factory,
}
self._servers[server['name']] = server
# In the client case we do not need to provide a
# factory, Just need a transport.
client = {
'name': 'client-9940',
'host': self.client_link_config['host'],
'port': self.client_link_config['port'],
'node': factory.create_transport('client-9940',
self.client_config,
self.client_link_config),
}
self._clients[client['name']] = client
self._logger = logging.getLogger(__name__)
def clients(self):
""" The clients that need to be started """
return self._clients
def servers(self):
""" The servers that need to be started """
return self._servers
def setup(self):
""" For this case, wait until our servers are all
connected before continuing with the test.
"""
# at this point the servers should be waiting
# so startup the clients
self.wait_for_client_connections(10)
self.wait_for_server_connections(10)
def teardown(self):
pass
def run(self):
""" This test is a demonstration of logon and
heartbeat/TestRequest processing. Usually
the logon process should be done from setup().
"""
client = self._clients['client-9940']['node']
client.protocol.heartbeat = 5
# We only have a single server connection
server = self._servers['server-9940']['factory'].servers[0]
server.protocol.heartbeat = 5
# client -> server
client.send_message(logon_message(client))
# server <- client
message = server.wait_for_message(title='waiting for logon')
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGON)])
# server -> client
server.send_message(logon_message(server))
server.start_heartbeat(True)
# client <- server
message = client.wait_for_message(title='waiting for logon ack')
client.start_heartbeat(True)
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGON)])
# Logout
client.send_message(logout_message(client))
message = server.wait_for_message(title='waiting for logout')
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGOUT)])
server.send_message(logout_message(server))
server.start_heartbeat(False)
message = client.wait_for_message('waiting for logout ack')
client.start_heartbeat(False)
assert_is_not_none(message)
assert_tag(message, [(35, FIX.LOGOUT)])
|
mit
| -4,686,859,582,700,257,000
| 32.914894
| 74
| 0.592221
| false
| 4.216931
| true
| false
| false
|
gwob/Maarifa
|
twpm/manage.py
|
1
|
5250
|
from csv import DictReader
from datetime import datetime
from pprint import pprint
from flask.ext.script import Manager
from taarifa_api import add_document, delete_documents, get_schema
from taarifa_waterpoints import app
from taarifa_waterpoints.schemas import facility_schema, service_schema
manager = Manager(app)
def check(response, success=201, print_status=True):
data, _, _, status = response
if status == success:
if print_status:
print " Succeeded"
return True
print "Failed with status", status
pprint(data)
return False
@manager.option("resource", help="Resource to show the schema for")
def show_schema(resource):
"""Show the schema for a given resource."""
pprint(get_schema(resource))
@manager.command
def list_routes():
"""List all routes defined for the application."""
import urllib
for rule in sorted(app.url_map.iter_rules(), key=lambda r: r.endpoint):
methods = ','.join(rule.methods)
print urllib.unquote("{:40s} {:40s} {}".format(rule.endpoint, methods,
rule))
@manager.command
def create_facility():
"""Create facility for waterpoints."""
check(add_document('facilities', facility_schema))
@manager.command
def create_service():
"""Create service for waterpoints."""
check(add_document('services', service_schema))
@manager.command
def delete_facilities():
"""Delete all facilities."""
check(delete_documents('facilities'), 200)
@manager.command
def delete_services():
"""Delete all services."""
check(delete_documents('services'), 200)
@manager.command
def delete_requests():
"""Delete all requests."""
check(delete_documents('requests'), 200)
@manager.option("filename", help="CSV file to upload (required)")
@manager.option("--skip", type=int, default=0, help="Skip a number of records")
@manager.option("--limit", type=int, help="Only upload a number of records")
def upload_waterpoints(filename, skip=0, limit=None):
"""Upload waterpoints from a CSV file."""
# Use sys.stdout.write so waterpoints can be printed nicely and succinctly
import sys
date_converter = lambda s: datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
bool_converter = lambda s: s == "T"
status_map = {
"non functional": "not functional",
"functional needs repair": "needs repair"
}
status_converter = lambda s: status_map.get(s.lower(), s.lower())
convert = {
'latitude': float,
'longitude': float,
'gid': int,
'objectid': int,
'valid_from': date_converter,
'valid_to': date_converter,
'amount_tsh': float,
'breakdown_year': int,
'date_recorded': date_converter,
'gps_height': float,
'x_wgs84': float,
'y_wgs84': float,
'num_privcon': int,
'pop_served': int,
'public_meeting': bool_converter,
'construction_year': int,
'status_group': status_converter,
'region_code': int,
'district_code': int,
'ward_code': int
}
def print_flush(msg):
sys.stdout.write(msg)
sys.stdout.flush()
facility_code = "wpf001"
print_every = 1000
print_flush("Adding waterpoints. Please be patient.")
with open(filename, 'rU') as f:
reader = DictReader(f)
for i in range(skip):
reader.next()
for i, d in enumerate(reader):
actual_index = i + skip + 2
do_print = actual_index % print_every == 0
try:
d = dict((k, convert.get(k, str)(v)) for k, v in d.items() if v)
coords = [d.pop('longitude'), d.pop('latitude')]
d['location'] = {'type': 'Point', 'coordinates': coords}
d['facility_code'] = facility_code
if not check(add_document('waterpoints', d), 201, False):
raise Exception()
if do_print:
print_flush(".")
except Exception as e:
print "Error adding waterpoint", e
pprint(d)
exit()
if limit and i >= limit:
break
# Create a 2dsphere index on the location field for geospatial queries
app.data.driver.db['resources'].ensure_index([('location', '2dsphere')])
print "Waterpoints uploaded!"
@manager.command
def ensure_indexes():
"""Make sure all important database indexes are created."""
print "Ensuring resources:location 2dsphere index is created ..."
app.data.driver.db['resources'].ensure_index([('location', '2dsphere')])
print "Done!"
@manager.option("status", help="Status (functional or non functional)")
@manager.option("wp", help="Waterpoint id")
def create_request(wp, status):
"""Create an example request reporting a broken waterpoint"""
r = {"service_code": "wps001",
"attribute": {"waterpoint_id": wp,
"status": status}}
check(add_document("requests", r))
@manager.command
def delete_waterpoints():
"""Delete all existing waterpoints."""
print delete_documents('waterpoints')
if __name__ == "__main__":
manager.run()
|
apache-2.0
| 7,342,189,801,768,494,000
| 29.172414
| 80
| 0.60781
| false
| 3.947368
| false
| false
| false
|
kevinah95/bmc-sequence-alignment
|
algorithms/needleman_wunsch/plot_nw.py
|
1
|
3189
|
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import datetime
from Bio.Seq import Seq
if __name__ == '__main__':
from needleman_wunsch import needleman_wunsch
else:
from .needleman_wunsch import needleman_wunsch
#-------------------------------
def plot_nw(seq_alpha_col,seq_beta_row,p_penalty):
if not seq_alpha_col or not seq_beta_row:
print("Alguna de las secuencias está vacía.")
return
plt.rcParams["figure.figsize"] = 20, 20
param = {"grid.linewidth": 1.6,
"grid.color": "lightgray",
"axes.linewidth": 1.6,
"axes.edgecolor": "lightgray",
"font.size": 8}
plt.rcParams.update(param)
# Data
headh = seq_alpha_col
headv = seq_beta_row
score_matrix, pt_mat, arrows = needleman_wunsch(seq_alpha_col,seq_beta_row,p_penalty,score_only=False)
# Plot
fig, ax = plt.subplots()
ax.set_xlim(-1.5, score_matrix.shape[1] - .5)
ax.set_ylim(-1.5, score_matrix.shape[0] - .5)
ax.invert_yaxis()
for i in range(score_matrix.shape[0]):
for j in range(score_matrix.shape[1]):
ax.text(j, i, score_matrix[i, j], ha="center", va="center")
for i, l in enumerate(headh):
ax.text(i + 1, -1, l, ha="center", va="center", fontweight="semibold")
for i, l in enumerate(headv):
ax.text(-1, i + 1, l, ha="center", va="center", fontweight="semibold")
ax.xaxis.set_minor_locator(ticker.FixedLocator(
np.arange(-1.5, score_matrix.shape[1] - .5, 1)))
ax.yaxis.set_minor_locator(ticker.FixedLocator(
np.arange(-1.5, score_matrix.shape[1] - .5, 1)))
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left="off", right="off", labelbottom='off', labelleft='off')
#-----------ax.set_aspect('auto')
ax.grid(True, which='minor')
arrowprops = dict(facecolor='blue', alpha=0.5, lw=0,
shrink=0.2, width=2, headwidth=7, headlength=7)
# all path
for i in range(1,pt_mat.shape[0]):
for j in range(1,pt_mat.shape[1]):
if(pt_mat[i][j]['left'] != ''):
ax.annotate("", xy=(j-1,i),
xytext=(j,i), arrowprops=arrowprops)
if(pt_mat[i][j]['diagonal'] != ''):
ax.annotate("", xy=(j-1,i-1),
xytext=(j,i), arrowprops=arrowprops)
if(pt_mat[i][j]['up'] != ''):
ax.annotate("", xy=(j,i-1),
xytext=(j,i), arrowprops=arrowprops)
# optimal path
arrowprops.update(facecolor='crimson')
for i in range(arrows.shape[0]):
ax.annotate("", xy=arrows[i, 2:], # origin
xytext=arrows[i, :2], arrowprops=arrowprops)
#------------
plt.gca().set_aspect('auto')
time = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
plt.savefig("output/needleman_wunsch/output-nw_"+time+".pdf", dpi=600)
#plt.show()
if __name__ == '__main__':
alpha = Seq("ACTCA")
beta = Seq("TTCAT")
penalty = {'MATCH': 1, 'MISMATCH': -1, 'GAP': -2}
plot_nw(alpha,beta,penalty)
|
mit
| -9,171,932,329,394,185,000
| 34.032967
| 106
| 0.548792
| false
| 3.12145
| false
| false
| false
|
cchampet/TuttleOFX
|
doc/scripts/plaintext2html.py
|
1
|
3911
|
#!/usr/bin/env python
from __future__ import with_statement
import re
import cgi
colorcodes = {'bold':{True:'\033[1m',False:'\033[22m'},
'cyan':{True:'\033[1;36m',False:'\033[0;0m'},
#'#8E4429':{True:'\033[1;33m',False:'\033[0;0m'},
'#8E4429':{True:'\033[0;33m',False:'\033[0;0m'},
#'#8E4429':{True:'\033[33m',False:'\033[0;0m'},
'#0000B0':{True:'\033[1;34m',False:'\033[0;0m'},
'#B63A11':{True:'\033[1;31m',False:'\033[0;0m'},
'magenta':{True:'\033[1;35m',False:'\033[0;0m'},
#'green':{True:'\033[1;32m',False:'\033[0;0m'},
'green':{True:'\033[0;32m',False:'\033[0;0m'},
#'green':{True:'\033[32m',False:'\033[0;0m'},
'underline':{True:'\033[1;4m',False:'\033[0;0m'}}
def recolor(color, text):
regexp = "(?:%s)(.*?)(?:%s)" % (colorcodes[color][True], colorcodes[color][False])
regexp = regexp.replace('[', r'\[')
return re.sub(regexp, r'''<span style="color: %s">\1</span>''' % color, text)
def resinglecolor(color, text, intxt):
regexp = "(?:\033\[1;32m%s)(.*?)" % intxt
return re.sub(regexp, r'<span style="color: green">%s\1</span>'% intxt, text)
def removestdcolor(text):
regexp = "(?:\033\[0;0m)(.*?)"
return re.sub(regexp, r'', text)
def bold(text):
regexp = "(?:%s)(.*?)(?:%s)" % (colorcodes['bold'][True], colorcodes['bold'][False])
regexp = regexp.replace('[', r'\[')
return re.sub(regexp, r'<span style="font-weight:bold">\1</span>', text)
def underline(text):
regexp = "(?:%s)(.*?)(?:%s)" % (colorcodes['underline'][True], colorcodes['underline'][False])
regexp = regexp.replace('[', r'\[')
return re.sub(regexp, r'<span style="text-decoration: underline">\1</span>', text)
def removebells(text):
return text.replace('\07', '')
def removebackspaces(text):
backspace_or_eol = r'(.\010)|(\033\[K)'
n = 1
while n > 0:
text, n = re.subn(backspace_or_eol, '', text, 1)
return text
template = '''\
<html>
<head>
</head>
<body>
%s
</body>
</html>
'''
re_string = re.compile(r'(?P<htmlchars>[<&>])|(?P<space>^[ \t]+)|(?P<lineend>\r\n|\r|\n)|(?P<protocal>(^|\s|\[)((http|ftp)://.*?))(\s|$|\])', re.S|re.M|re.I)
def plaintext2html(text, tabstop=4):
def do_sub(m):
c = m.groupdict()
if c['htmlchars']:
return cgi.escape(c['htmlchars'])
if c['lineend']:
return '<br>'
elif c['space']:
t = m.group().replace('\t', ' '*tabstop)
t = t.replace(' ', ' ')
return t
elif c['space'] == '\t':
return ' '*tabstop;
else:
url = m.group('protocal')
#print url
if url.startswith('['):
prefix = '['
suffix = ']'
url = url[1:]
else:
prefix = ''
suffix = ''
last = m.groups()[-1]
if last in ['\n', '\r', '\r\n']:
last = '<br>'
return '%s<a href=%s>%s</a>%s' % (prefix, url, url, suffix)
result = re.sub(re_string, do_sub, text)
result = result.replace(' ', ' ')
result = result.replace('\t', ' '*tabstop)
result = recolor('cyan', result)
result = recolor('#8E4429', result)
result = recolor('#0000B0', result)
result = recolor('#B63A11', result)
result = recolor('magenta', result)
result = recolor('green', result)
result = resinglecolor('green', result, 'Source')
result = resinglecolor('green', result, 'Output')
result = bold(result)
result = underline(result)
result = removebells(result)
result = removebackspaces(result)
result = removestdcolor(result)
return template % result
if __name__ == '__main__':
import sys
with open(sys.argv[-1]) as f:
text = f.read()
print plaintext2html(text)
|
gpl-3.0
| -902,266,013,893,889,500
| 31.591667
| 157
| 0.520327
| false
| 2.996935
| false
| false
| false
|
mantarayforensics/mantaray
|
Tools/Python/extract_ntfs_artifacts_mr.py
|
1
|
15861
|
#!/usr/bin/env python3
#This program extracts NTFS artifacts ($MFT, $Logfile, $USRJRNL) (Overt, Deleted
#Shadow Volumes)
#Use to extract files when using Triforce ANJP NTFS Journal Parser | Triforce (David Cohen)
#########################COPYRIGHT INFORMATION############################
#Copyright (C) 2013 dougkoster@hotmail.com #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
#
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
#
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see http://www.gnu.org/licenses/. #
#########################COPYRIGHT INFORMATION############################
from easygui import *
from get_case_number import *
from get_output_location import *
from select_file_to_process import *
from parted import *
from mount import *
from mount_ewf import *
from done import *
from unix2dos import *
from remove_dupes_module_noask import *
from mmls import *
from Windows_Time_Converter_module import *
from check_for_folder import *
import os
from os.path import join
import re
import io
import sys
import string
import subprocess
import datetime
import shutil
import struct
### GET BLOCK SIZE ##############################################################################################
def get_block_size_mmls(Image_Path, outfile):
block_size = subprocess.check_output(['mmls -i raw ' + Image_Path + " | grep Units | awk '{print $4}' | sed s/-byte//"], shell=True, universal_newlines=True)
block_size = block_size.strip()
print("The block size is: " + str(block_size))
outfile.write("The block size is: " + str(block_size) + "\n\n")
return block_size
def get_block_size_parted(outfile, temp_time):
block_size_command = "sudo cat /tmp/timeline_partition_info_" + temp_time +".txt | grep -a " + "'"+"Sector size"+"'" + " | awk {'print $4'} | sed s_B/.*__"
outfile.write("The block_size command is: " + block_size_command + "\n")
block_size = subprocess.check_output([block_size_command], shell=True, universal_newlines=True)
block_size = block_size.strip()
print("The block size is: " + str(block_size))
outfile.write("The block size is: " + str(block_size) + "\n\n")
return block_size
### END GET BLOCK SIZE ##########################################################################################
### PROCESS FLS OUTPUT ###### ############################################################
def process_fls_output(value, key, Image_Path, block_size, folder_path, item, file_type, outfile, temp_file):
#divide offset by block size so it is in correct format for fls
key_bytes = int(key)//int(block_size)
#open FLS output file
fls_output_file = open("/tmp/fls_output_ntfs_" + temp_file + ".txt", 'r')
for line in fls_output_file:
#print current line for debugging purposes
#print(line)
newList=[]
#strip carriage returns
line = line.strip()
line_split = line.split('/')
#print(line_split)
for i in line_split:
newList.append(i.split('\t')[0])
#print (newList)
#assign items in newList to variables
inode_number_temp = newList[1]
#strip alpha chars from inode & leading space
inode_number = re.sub('[a-z]','',inode_number_temp)
inode_number = re.sub('^ +','', inode_number)
#get file_name
file_name = newList[-1]
if(item == "NO"):
if(re.search('OrphanFiles', line)):
#copy files out using icat
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_" + file_type +"_DELETED" + "'"
else:
#get user profile name
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_OVERT_" + file_type + "'"
else: #these are the shadow volume files
if(re.search('OrphanFiles', line)):
#copy files out using icat
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_DELETED_" + file_type + "_" + item + "'"
else:
#get user profile name
icat_command = "icat -r -i raw -f " + value + " -o " + str(key_bytes) + " -b " + block_size + " " + Image_Path + " " + inode_number + " > " + "'" + folder_path + "/" + inode_number + "_Partition_" + str(key) + "_" + file_type + "_" + item + "'"
print("File Name: " + file_name.ljust(10) + "\t" "Inode number: " + inode_number.ljust(10))
outfile.write("The icat command is: " + icat_command + "\n")
#run icat command
subprocess.call([icat_command], shell=True)
#close file
fls_output_file.close()
##########################################################################################
### PROCESS OVERT / DELETED HIVES ##############################################################################
def process_overt_deleted_files(value, key, Image_Path, outfile, folder_path, block_size, item, temp_time):
#divide offset by block size so it is in correct format for fls
key_bytes = int(key)//int(block_size)
#run fls to get information for MFT files
fls_command = "fls -Fpr -f ntfs -i raw -o " + str(key_bytes) + " " + Image_Path + " | grep -i '\$MFT$' | sed s/:// | sed s/*// > /tmp/fls_output_ntfs_" + temp_time + ".txt"
#print ("\nThe fls command is: " + fls_command + "\n")
print("\nSearching for $MFT files")
outfile.write("The fls command is: " + fls_command + "\n")
#run fls command
subprocess.call([fls_command], shell=True)
#process fls output
process_fls_output(value, key, Image_Path, block_size, folder_path, item, "MFT", outfile, temp_time)
#run fls to get information for Logfiles files
fls_command = "fls -Fpr -f ntfs -i raw -o " + str(key_bytes) + " " + Image_Path + " | grep -i '\$LogFile$' | sed s/:// | sed s/*// > /tmp/fls_output_ntfs_" + temp_time +".txt"
#print ("\nThe fls command is: " + fls_command + "\n")
print("\nSearching for $LogFiles files")
outfile.write("The fls command is: " + fls_command + "\n")
#run fls command
subprocess.call([fls_command], shell=True)
#process fls output
process_fls_output(value, key, Image_Path, block_size, folder_path, item, "LogFile", outfile, temp_time)
#run fls to get information for $UsrJrnl files
fls_command = "fls -Fpr -f ntfs -i raw -o " + str(key_bytes) + " " + Image_Path + " | grep -i '\$UsnJrnl.\$J$' | sed s/:// | sed s/*// > /tmp/fls_output_ntfs_" + temp_time + ".txt"
#print ("\nThe fls command is: " + fls_command + "\n")
print("\nSearching for $UsrJrnl files")
outfile.write("The fls command is: " + fls_command + "\n")
#run fls command
subprocess.call([fls_command], shell=True)
#process fls output
process_fls_output(value, key, Image_Path, block_size, folder_path, item, "UsnJrnl", outfile, temp_time)
### END PROCESS OVERT / DELETED HIVES ##############################################################################
### CHECK FOR SHADOW VOLUMES ################################################
def check_for_shadow_volumes(Image_Path, key, block_size, outfile, folder_path, temp_time):
#set shadow volume variables
has_shadow_volumes = "NULL"
vssvolume_mnt = "NULL"
#divide offset by block size so it is in correct format for vshadowinfo
key_bytes = int(key)//int(block_size)
key_bytes_disk_offset = int(key) * int(block_size)
image_no_quotes = Image_Path.replace("'","")
print("\nChecking: " + Image_Path + " for shadow volumes")
f = open('/tmp/dump_' + temp_time + '.txt', 'w+t')
try:
vshadow_info_command = "vshadowinfo -v -o " + str(key) + " " + Image_Path# + " > /tmp/dump.txt"
#print("The vshadow_command is: " + vshadow_info_command)
outfile.write("The vshadow_command is: " + vshadow_info_command)
subprocess.call([vshadow_info_command], shell=True, stdout = f, stderr=subprocess.STDOUT)
#vshadow_output = subprocess.check_output([vshadow_info_command], shell=True, stderr=subprocess.STDOUT)
#f.close()
f =open('/tmp/dump_' + temp_time + '.txt', 'rt')
#print("try succedded")
for line in f:
line = line.strip()
print(line)
if (re.search("No Volume Shadow Snapshots found", line)):
has_shadow_volumes = "NO"
if(has_shadow_volumes != "NO"):
print("Partition at offset: " + str(key_bytes) + " has shadow volumes.")
outfile.write("Partition at offset: " + str(key_bytes) + " has shadow volumes.")
#check for existence of folder
vssvolume_mnt = check_for_folder("/mnt/vssvolume", outfile)
#mount shadow volumes for partition
mount_shadow_command = "sudo vshadowmount -o " + str(key) + " " + Image_Path + " " + vssvolume_mnt
print("The mount_shadow_command is: " + mount_shadow_command)
subprocess.call(["sudo vshadowmount -o " + str(key) + " " + Image_Path + " " + vssvolume_mnt], shell=True, stderr=subprocess.STDOUT)
#pass vssvolume mount point to mount_shadow_volume for mounting
mount_shadow_volumes(vssvolume_mnt, outfile, folder_path)
elif(has_shadow_volumes == "NO"):
print("Partition at offset: " + str(key) + " has no shadow volumes")
f.close()
except:
print("The vshadow_info command for partition: " + str(key) + " failed")
return vssvolume_mnt
#############################################################################
#### MOUNT INDIVIDUAL SHADOW VOLUMES ########################################
def mount_shadow_volumes(vssvolume_mnt, outfile, folder_path):
print("Inside mount_shadow_volumes sub")
print("Vssvolume_mnt: " + vssvolume_mnt)
#check for existence of folder
vss_mount = check_for_folder("/mnt/vss_mount", outfile)
vss_volumes = os.listdir(vssvolume_mnt)
print(vss_volumes)
for item in vss_volumes:
print("About to process Shadow Volume: " + item)
#call parted function
partition_info_dict, temp_time = parted(outfile, vssvolume_mnt + "/"+item)
block_size = get_block_size_parted(outfile, temp_time)
for key,value in partition_info_dict.items():
print("About to process registry hives from: " + item)
process_overt_deleted_files(value, key, vssvolume_mnt+"/"+item, outfile, folder_path, block_size, item, temp_time)
#############################################################################
### MAIN PROGRAM ########################################################################################################################
def extract_ntfs_artifacts_mr(item_to_process, case_number, root_folder_path, evidence):
print("The item to process is: " + item_to_process)
print("The case_name is: " + case_number)
print("The output folder is: " + root_folder_path)
print("The evidence to process is: " + evidence)
#get datetime
now = datetime.datetime.now()
#set Mount Point
mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S")
#create output folder path
folder_path = root_folder_path + "/" + "NTFS_Artifacts"
check_for_folder(folder_path, "NONE")
#open a log file for output
log_file = folder_path + "/NTFS_Artifacts_logfile.txt"
outfile = open(log_file, 'wt+')
Image_Path = '"' + evidence + '"'
#set item variable to tell functions whether data is from shadow volumes
item = "NO"
#check if Image file is in Encase format
if re.search(".E01", Image_Path):
#set mount point
mount_point = "/mnt/"+case_number+"_unallocated"
Image_Path = mount_ewf(Image_Path, outfile, mount_point)
#call mmls function
partition_info_dict, temp_time = mmls(outfile, Image_Path)
partition_info_dict_temp = partition_info_dict
#get filesize of mmls_output.txt
file_size = os.path.getsize("/tmp/mmls_output_" + temp_time + ".txt")
#if filesize of mmls output is 0 then run parted
if(file_size == 0):
print("mmls output was empty, running parted\n")
outfile.write("mmls output was empty, running parted\n")
#call parted function
partition_info_dict, temp_time = parted(outfile, Image_Path)
block_size = get_block_size_parted(outfile, temp_time)
else:
#get block_size since mmls was successful
block_size = get_block_size_mmls(Image_Path, outfile)
#read through the mmls output and look for GUID Partition Tables (used on MACS)
mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r')
for line in mmls_output_file:
if re.search("GUID Partition Table", line):
print("We found a GUID partition table, need to use parted")
outfile.write("We found a GUID partition table, need to use parted\n")
#call parted function
partition_info_dict, temp_time = parted(outfile, Image_Path)
#loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY)
for key,value in partition_info_dict.items():
#process overt registy hives
if(value =="ntfs") or (value=="fat32"):
if not os.path.exists(folder_path + "/Partition_" + str(key)):
os.makedirs(folder_path + "/Partition_" + str(key))
#print("Just created output folder: " + folder_path + "/Partition_" + str(key))
outfile.write("Just created output folder: " + folder_path + "/Partition_" + str(key) + "\n\n")
else:
print("Output folder: " + folder_path +"/Partition_" + str(key) + " already exists")
outfile.write("Output folder: " + folder_path +"/Partition_" + str(key) + " already exists\n\n")
process_overt_deleted_files(value, key, Image_Path, outfile, folder_path, block_size, item, temp_time)
vssvolume_mnt = check_for_shadow_volumes(Image_Path, key, block_size, outfile, folder_path, temp_time)
else:
print("This partition is not formatted NTFS or FAT32")
outfile.write("This partition is not formatted NTFS or FAT32\n\n")
#run fdupes against output path to eliminate dupes
remove_dupes_module_noask(folder_path, outfile, str(key))
#chdir to output foler
os.chdir(folder_path)
#unmount shadow volumes
if(vssvolume_mnt != "NULL"):
print("Unmounting: " + vssvolume_mnt)
outfile.write("Unmounting: " + vssvolume_mnt + "\n")
subprocess.call(['sudo umount -f ' + vssvolume_mnt], shell=True)
os.rmdir(vssvolume_mnt)
#unmount and remount points
if re.search(".E01", Image_Path):
if(os.path.exists(mount_point+"_ewf")):
subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
os.rmdir(mount_point+"_ewf")
#remove empty directories
for root, dirs, files in os.walk(folder_path, topdown = False):
for directory in dirs:
dir_path = os.path.join(root, directory)
if not os.listdir(dir_path):
outfile.write("Removing empty folder: " + dir_path + "\n")
os.rmdir(dir_path)
#close outfiles
outfile.close()
#delete temp files
os.remove('/tmp/fls_output_ntfs_' + temp_time + '.txt')
#run text files through unix2dos
for root, dirs, files in os.walk(folder_path):
for filenames in files:
#get file extension
fileName, fileExtension = os.path.splitext(filenames)
if(fileExtension.lower() == ".txt"):
full_path = os.path.join(root,filenames)
quoted_full_path = "'" +full_path+"'"
print("Running Unix2dos against file: " + filenames)
unix2dos_command = "sudo unix2dos " + quoted_full_path
subprocess.call([unix2dos_command], shell=True)
|
gpl-3.0
| 2,019,264,180,146,138,400
| 39.256345
| 256
| 0.617868
| false
| 3.182384
| false
| false
| false
|
vlegoff/tsunami
|
src/secondaires/auberge/editeurs/aubedit/__init__.py
|
1
|
3826
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur d'auberge 'aubedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package
Note importante : ce package contient la définition d'un éditeur, mais
celui-ci peut très bien être étendu par d'autres modules. Au quel cas,
les extensions n'apparaîtront pas ici.
"""
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.uniligne import Uniligne
from secondaires.auberge.editeurs.aubedit.edt_chambres import EdtChambres
class EdtAubedit(Presentation):
"""Classe définissant l'éditeur d'auberge."""
nom = "aubedit"
def __init__(self, personnage, auberge):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, auberge)
if personnage and auberge:
self.construire(auberge)
def __getnewargs__(self):
return (None, None)
def construire(self, auberge):
"""Construction de l'éditeur"""
# Titre
titre = self.ajouter_choix("titre", "t", Uniligne, auberge, "titre")
titre.parent = self
titre.prompt = "Titre de l'auberge : "
titre.apercu = "{objet.titre}"
titre.aide_courte = \
"Entrez le |ent|titre|ff| de l'auberge ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nTitre actuel : " \
"|bc|{objet.titre}|ff|"
# Clé de l'aubergiste
cle = self.ajouter_choix("clé de l'aubergiste", "a", Uniligne,
auberge, "cle_aubergiste")
cle.parent = self
cle.prompt = "Clé du prototype de l'aubergiste : "
cle.apercu = "{objet.cle_aubergiste}"
cle.aide_courte = \
"Entrez la |ent|clé de l'aubergiste|ff| ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nClé actuelle : " \
"|bc|{objet.cle_aubergiste}|ff|"
# Chambres
chambres = self.ajouter_choix("chambres", "c", EdtChambres, auberge)
chambres.parent = self
chambres.apercu = "\n{objet.aff_chambres}"
|
bsd-3-clause
| -3,694,249,358,320,712,000
| 40.326087
| 79
| 0.694108
| false
| 3.364602
| false
| false
| false
|
fkie-cad/FACT_core
|
src/storage/db_interface_compare.py
|
1
|
4915
|
import logging
from contextlib import suppress
from time import time
from typing import List, Optional
from pymongo.errors import PyMongoError
from helperFunctions.data_conversion import (
convert_compare_id_to_list, convert_uid_list_to_compare_id, normalize_compare_id
)
from storage.db_interface_common import MongoInterfaceCommon
class FactCompareException(Exception):
def get_message(self):
if self.args: # pylint: disable=using-constant-test
return self.args[0] # pylint: disable=unsubscriptable-object
return ''
class CompareDbInterface(MongoInterfaceCommon):
def _setup_database_mapping(self):
super()._setup_database_mapping()
self.compare_results = self.main.compare_results
def add_compare_result(self, compare_result):
compare_result['_id'] = self._calculate_compare_result_id(compare_result)
compare_result['submission_date'] = time()
with suppress(PyMongoError):
self.compare_results.delete_one({'_id': compare_result['_id']})
self.compare_results.insert_one(compare_result)
logging.info('compare result added to db: {}'.format(compare_result['_id']))
def get_compare_result(self, compare_id: str) -> Optional[dict]:
compare_id = normalize_compare_id(compare_id)
self.check_objects_exist(compare_id)
compare_result = self.compare_results.find_one(compare_id)
if compare_result:
logging.debug('got compare result from db: {}'.format(compare_id))
return compare_result
logging.debug('compare result not found in db: {}'.format(compare_id))
return None
def check_objects_exist(self, compare_id, raise_exc=True):
for uid in convert_compare_id_to_list(compare_id):
if not self.exists(uid):
if raise_exc:
raise FactCompareException('{} not found in database'.format(uid))
return True
return False
def compare_result_is_in_db(self, compare_id):
compare_result = self.compare_results.find_one(normalize_compare_id(compare_id))
return bool(compare_result)
def delete_old_compare_result(self, compare_id):
try:
self.compare_results.remove({'_id': normalize_compare_id(compare_id)})
logging.debug('old compare result deleted: {}'.format(compare_id))
except Exception as exception:
logging.warning('Could not delete old compare result: {} {}'.format(type(exception).__name__, exception))
@staticmethod
def _calculate_compare_result_id(compare_result):
general_dict = compare_result['general']
uid_set = set()
for key in general_dict:
uid_set.update(list(general_dict[key].keys()))
comp_id = convert_uid_list_to_compare_id(list(uid_set))
return comp_id
def page_compare_results(self, skip=0, limit=0):
db_entries = self.compare_results.find({'submission_date': {'$gt': 1}}, {'general.hid': 1, 'submission_date': 1}, skip=skip, limit=limit, sort=[('submission_date', -1)])
all_previous_results = [(item['_id'], item['general']['hid'], item['submission_date']) for item in db_entries]
return [
compare
for compare in all_previous_results
if self._all_objects_are_in_db(compare[0])
]
def _all_objects_are_in_db(self, compare_id):
try:
self.check_objects_exist(compare_id)
return True
except FactCompareException:
return False
def get_total_number_of_results(self):
db_entries = self.compare_results.find({'submission_date': {'$gt': 1}}, {'_id': 1})
return len([1 for entry in db_entries if not self.check_objects_exist(entry['_id'], raise_exc=False)])
def get_ssdeep_hash(self, uid):
file_object_entry = self.file_objects.find_one({'_id': uid}, {'processed_analysis.file_hashes.ssdeep': 1})
return file_object_entry['processed_analysis']['file_hashes']['ssdeep'] if 'file_hashes' in file_object_entry['processed_analysis'] else None
def get_entropy(self, uid):
file_object_entry = self.file_objects.find_one({'_id': uid}, {'processed_analysis.unpacker.entropy': 1})
return file_object_entry['processed_analysis']['unpacker']['entropy'] if 'unpacker' in file_object_entry['processed_analysis'] and 'entropy' in file_object_entry['processed_analysis']['unpacker'] else 0.0
def get_exclusive_files(self, compare_id: str, root_uid: str) -> List[str]:
if compare_id is None or root_uid is None:
return []
try:
result = self.get_compare_result(compare_id)
exclusive_files = result['plugins']['File_Coverage']['exclusive_files'][root_uid]
except (KeyError, FactCompareException):
exclusive_files = []
return exclusive_files
|
gpl-3.0
| 2,370,626,291,600,637,000
| 44.091743
| 212
| 0.647202
| false
| 3.885375
| false
| false
| false
|
d2chau/git-demo-fizzbuzz
|
python-fizz-buzz/fizzbuzz.py
|
1
|
1059
|
def basicfizzbuzz(n):
if n % 3 == 0 and n % 5 == 0:
return 'FizzBuzz'
elif n % 3 == 0:
return 'Fizz'
elif n % 5 == 0:
return 'Buzz'
else:
return str(n)
print "\n".join(basicfizzbuzz(n) for n in xrange(1, 100))
print "\n"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "****************************************************************************"
print "\n"
def fizzbuzz(n):
one = False
tempStr = ""
dict = {'Buzz': 5, 'Fizz': 3};
for key, value in dict.items():
if n % value == 0:
tempStr = tempStr + key
if not tempStr:
tempStr = str(n)
return tempStr
print "\n".join(fizzbuzz(n) for n in xrange(1, 100))
|
apache-2.0
| -1,047,292,258,414,213,900
| 28.416667
| 84
| 0.313503
| false
| 4.202381
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.