gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
""" Model definition functions and weight loading.
"""
from __future__ import print_function, division
from keras.models import Model, Sequential
from keras.layers.merge import concatenate
from keras.layers import Input, Bidirectional, Embedding, Dense, Dropout, SpatialDropout1D, LSTM, Activation
from keras.regularizers import L1L2
from attlayer import AttentionWeightedAverage
from global_variables import NB_TOKENS, NB_EMOJI_CLASSES
import numpy as np
from copy import deepcopy
from os.path import exists
import h5py
def deepmoji_feature_encoding(maxlen, weight_path, return_attention=False):
""" Loads the pretrained DeepMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
maxlen: Maximum length of a sentence (given in tokens).
weight_path: Path to model weights to be loaded.
return_attention: If true, output will be weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = deepmoji_architecture(nb_classes=None, nb_tokens=NB_TOKENS,
maxlen=maxlen, feature_output=True,
return_attention=return_attention)
load_specific_weights(model, weight_path, exclude_names=['softmax'])
return model
def deepmoji_emojis(maxlen, weight_path, return_attention=False):
""" Loads the pretrained DeepMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
maxlen: Maximum length of a sentence (given in tokens).
weight_path: Path to model weights to be loaded.
return_attention: If true, output will be weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = deepmoji_architecture(nb_classes=NB_EMOJI_CLASSES,
nb_tokens=NB_TOKENS, maxlen=maxlen,
return_attention=return_attention)
model.load_weights(weight_path, by_name=False)
return model
def deepmoji_transfer(nb_classes, maxlen, weight_path=None, extend_embedding=0,
embed_dropout_rate=0.25, final_dropout_rate=0.5,
embed_l2=1E-6):
""" Loads the pretrained DeepMoji model for finetuning/transfer learning.
Does not load weights for the softmax layer.
Note that if you are planning to use class average F1 for evaluation,
nb_classes should be set to 2 instead of the actual number of classes
in the dataset, since binary classification will be performed on each
class individually.
Note that for the 'new' method, weight_path should be left as None.
# Arguments:
nb_classes: Number of classes in the dataset.
maxlen: Maximum length of a sentence (given in tokens).
weight_path: Path to model weights to be loaded.
extend_embedding: Number of tokens that have been added to the
vocabulary on top of NB_TOKENS. If this number is larger than 0,
the embedding layer's dimensions are adjusted accordingly, with the
additional weights being set to random values.
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
embed_l2: L2 regularization for the embedding layerl.
# Returns:
Model with the given parameters.
"""
model = deepmoji_architecture(nb_classes=nb_classes,
nb_tokens=NB_TOKENS + extend_embedding,
maxlen=maxlen, embed_dropout_rate=embed_dropout_rate,
final_dropout_rate=final_dropout_rate, embed_l2=embed_l2)
if weight_path is not None:
load_specific_weights(model, weight_path,
exclude_names=['softmax'],
extend_embedding=extend_embedding)
return model
def deepmoji_architecture(nb_classes, nb_tokens, maxlen, feature_output=False, embed_dropout_rate=0, final_dropout_rate=0, embed_l2=1E-6, return_attention=False):
"""
Returns the DeepMoji architecture uninitialized and
without using the pretrained model weights.
# Arguments:
nb_classes: Number of classes in the dataset.
nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).
maxlen: Maximum length of a token.
feature_output: If True the model returns the penultimate
feature vector rather than Softmax probabilities
(defaults to False).
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
embed_l2: L2 regularization for the embedding layerl.
# Returns:
Model with the given parameters.
"""
# define embedding layer that turns word tokens into vectors
# an activation function is used to bound the values of the embedding
model_input = Input(shape=(maxlen,), dtype='int32')
embed_reg = L1L2(l2=embed_l2) if embed_l2 != 0 else None
embed = Embedding(input_dim=nb_tokens,
output_dim=256,
mask_zero=True,
input_length=maxlen,
embeddings_regularizer=embed_reg,
name='embedding')
x = embed(model_input)
x = Activation('tanh')(x)
# entire embedding channels are dropped out instead of the
# normal Keras embedding dropout, which drops all channels for entire words
# many of the datasets contain so few words that losing one or more words can alter the emotions completely
if embed_dropout_rate != 0:
embed_drop = SpatialDropout1D(embed_dropout_rate, name='embed_drop')
x = embed_drop(x)
# skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
# ordering of the way the merge is done is important for consistency with the pretrained model
lstm_0_output = Bidirectional(LSTM(512, return_sequences=True), name="bi_lstm_0")(x)
lstm_1_output = Bidirectional(LSTM(512, return_sequences=True), name="bi_lstm_1")(lstm_0_output)
x = concatenate([lstm_1_output, lstm_0_output, x])
# if return_attention is True in AttentionWeightedAverage, an additional tensor
# representing the weight at each timestep is returned
weights = None
x = AttentionWeightedAverage(name='attlayer', return_attention=return_attention)(x)
if return_attention:
x, weights = x
if not feature_output:
# output class probabilities
if final_dropout_rate != 0:
x = Dropout(final_dropout_rate)(x)
if nb_classes > 2:
outputs = [Dense(nb_classes, activation='softmax', name='softmax')(x)]
else:
outputs = [Dense(1, activation='sigmoid', name='softmax')(x)]
else:
# output penultimate feature vector
outputs = [x]
if return_attention:
# add the attention weights to the outputs if required
outputs.append(weights)
return Model(inputs=[model_input], outputs=outputs, name="DeepMoji")
def load_specific_weights(model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):
""" Loads model weights from the given file path, excluding any
given layers.
# Arguments:
model: Model whose weights should be loaded.
weight_path: Path to file containing model weights.
exclude_names: List of layer names whose weights should not be loaded.
extend_embedding: Number of new words being added to vocabulary.
verbose: Verbosity flag.
# Raises:
ValueError if the file at weight_path does not exist.
"""
if not exists(weight_path):
raise ValueError('ERROR (load_weights): The weights file at {} does '
'not exist. Refer to the README for instructions.'
.format(weight_path))
if extend_embedding and 'embedding' in exclude_names:
raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '
'without loading the embedding weights.')
# Copy only weights from the temporary model that are wanted
# for the specific task (e.g. the Softmax is often ignored)
layer_weights = get_weights_from_hdf5(weight_path)
for i, w in enumerate(layer_weights):
l_name = w[0]
weight_names = w[1]
weight_values = w[2]
if l_name in exclude_names:
if verbose:
print('Ignoring weights for {}'.format(l_name))
continue
try:
model_l = model.get_layer(name=l_name)
except ValueError:
raise ValueError("Weights had layer {},".format(l_name) +
" but could not find this layer in model.")
if verbose:
print('Loading weights for {}'.format(l_name))
# extend embedding layer to allow new randomly initialized words
# if requested. Otherwise, just load the weights for the layer.
if type(model_l) is Embedding and extend_embedding > 0:
comb_weights = append_to_embedding(weight_values,
model_l.get_weights())
model_l.set_weights(comb_weights)
if verbose:
print('Extended vocabulary for embedding layer ' +
'from {} to {} tokens.'.format(
NB_TOKENS, NB_TOKENS + extend_embedding))
else:
model_l.set_weights(weight_values)
def append_to_embedding(pretrain_weights, random_init_weights):
""" Uses pretrained weights for the tokens already in the vocabulary.
Remaining weights will be left with the random initialization. """
pretrain_weights = deepcopy(pretrain_weights)
if type(pretrain_weights) == list:
pretrain_weights = pretrain_weights[0]
if type(random_init_weights) == list:
random_init_weights = random_init_weights[0]
nb_old_tokens = np.shape(pretrain_weights)[0]
random_init_weights[:nb_old_tokens] = pretrain_weights
# must be returned as a list to be properly inserted into Keras model
return [random_init_weights]
def get_weights_from_hdf5(filepath):
""" Loads the weights from a saved Keras model into numpy arrays.
The weights are saved using Keras 2.0 so we don't need all the
conversion functionality for handling old weights.
"""
with h5py.File(filepath, mode='r') as f:
layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
layer_weights = []
for k, l_name in enumerate(layer_names):
g = f[l_name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name][:] for weight_name in weight_names]
if len(weight_values):
layer_weights.append([l_name, weight_names, weight_values])
return layer_weights
|
|
#
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
# Import system modules
import sys
# Import Numpy and Obspy
import numpy as np
import obspy
# Local imports
from seisflows.tools import msg, unix
from seisflows.tools.tools import exists, getset
from seisflows.config import ParameterError
from seisflows.plugins import adjoint, misfit, readers, writers
from seisflows.tools import signal
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
class base(object):
""" Data preprocessing class
Provides data processing functions for seismic traces, with options for
data misfit, filtering, normalization and muting
"""
def check(self):
""" Checks parameters and paths
"""
# used for inversion
if 'MISFIT' not in PAR:
setattr(PAR, 'MISFIT', None)
# used for migration
if 'BACKPROJECT' not in PAR:
setattr(PAR, 'BACKPROJECT', None)
# data file format
if 'FORMAT' not in PAR:
raise ParameterError(PAR, 'FORMAT')
# data normalization option
if 'NORMALIZE' not in PAR:
setattr(PAR, 'NORMALIZE', None)
# data muting option
if 'MUTE' not in PAR:
setattr(PAR, 'MUTE', None)
# data filtering option
if 'FILTER' not in PAR:
setattr(PAR, 'FILTER', None)
# assertions
if PAR.FORMAT not in dir(readers):
print msg.ReaderError
raise ParameterError()
if PAR.FORMAT not in dir(writers):
print msg.WriterError
raise ParameterError()
self.check_filter()
self.check_mute()
self.check_normalize()
def setup(self):
""" Sets up data preprocessing machinery
"""
# define misfit function and adjoint trace generator
if PAR.MISFIT:
self.misfit = getattr(misfit, PAR.MISFIT)
self.adjoint = getattr(adjoint, PAR.MISFIT)
elif PAR.BACKPROJECT:
self.adjoint = getattr(adjoint, PAR.BACKPROJECT)
# define seismic data reader and writer
self.reader = getattr(readers, PAR.FORMAT)
self.writer = getattr(writers, PAR.FORMAT)
def prepare_eval_grad(self, path='.'):
"""
Prepares solver for gradient evaluation by writing residuals and
adjoint traces
:input path: directory containing observed and synthetic seismic data
"""
solver = sys.modules['seisflows_solver']
for filename in solver.data_filenames:
obs = self.reader(path+'/'+'traces/obs', filename)
syn = self.reader(path+'/'+'traces/syn', filename)
# process observations
obs = self.apply_filter(obs)
obs = self.apply_mute(obs)
obs = self.apply_normalize(obs)
# process synthetics
syn = self.apply_filter(syn)
syn = self.apply_mute(syn)
syn = self.apply_normalize(syn)
if PAR.MISFIT:
self.write_residuals(path, syn, obs)
self.write_adjoint_traces(path+'/'+'traces/adj', syn, obs,
filename)
def write_residuals(self, path, syn, obs):
"""
Computes residuals
:input path: location "adjoint traces" will be written
:input syn: obspy Stream object containing synthetic data
:input obs: obspy Stream object containing observed data
"""
nt, dt, _ = self.get_time_scheme(syn)
nn, _ = self.get_network_size(syn)
residuals = []
for ii in range(nn):
residuals.append(self.misfit(syn[ii].data, obs[ii].data, nt, dt))
filename = path+'/'+'residuals'
if exists(filename):
residuals.extend(list(np.loadtxt(filename)))
np.savetxt(filename, residuals)
def sum_residuals(self, files):
"""
Sums squares of residuals
:input files: list of single-column text files containing residuals
:output total_misfit: sum of squares of residuals
"""
total_misfit = 0.
for filename in files:
total_misfit += np.sum(np.loadtxt(filename)**2.)
return total_misfit
def write_adjoint_traces(self, path, syn, obs, channel):
"""
Writes "adjoint traces" required for gradient computation
:input path: location "adjoint traces" will be written
:input syn: obspy Stream object containing synthetic data
:input obs: obspy Stream object containing observed data
:input channel: channel or component code used by writer
"""
nt, dt, _ = self.get_time_scheme(syn)
nn, _ = self.get_network_size(syn)
adj = syn
for ii in range(nn):
adj[ii].data = self.adjoint(syn[ii].data, obs[ii].data, nt, dt)
self.writer(adj, path, channel)
# Signal processing
def apply_filter(self, traces):
if not PAR.FILTER:
return traces
elif PAR.FILTER == 'Bandpass':
for tr in traces:
tr.detrend('demean')
tr.detrend('linear')
tr.taper(0.05, type='hann')
tr.filter('bandpass',
zerophase=True,
freqmin=PAR.FREQMIN,
freqmax=PAR.FREQMAX)
elif PAR.FILTER == 'Lowpass':
for tr in traces:
tr.detrend('demean')
tr.detrend('linear')
tr.taper(0.05, type='hann')
tr.filter('lowpass',
zerophase=True,
freq=PAR.FREQ)
elif PAR.FILTER == 'Highpass':
for tr in traces:
tr.detrend('demean')
tr.detrend('linear')
tr.taper(0.05, type='hann')
tr.filter('highpass',
zerophase=True,
freq=PAR.FREQ)
else:
raise ParameterError()
return traces
def apply_mute(self, traces):
if not PAR.MUTE:
return traces
if 'MuteEarlyArrivals' in PAR.MUTE:
traces = signal.mute_early_arrivals(traces,
PAR.MUTE_EARLY_ARRIVALS_SLOPE, # (units: time/distance)
PAR.MUTE_EARLY_ARRIVALS_CONST, # (units: time)
self.get_time_scheme(traces),
self.get_source_coords(traces),
self.get_receiver_coords(traces))
if 'MuteLateArrivals' in PAR.MUTE:
traces = signal.mute_late_arrivals(traces,
PAR.MUTE_LATE_ARRIVALS_SLOPE, # (units: time/distance)
PAR.MUTE_LATE_ARRIVALS_CONST, # (units: time)
self.get_time_scheme(traces),
self.get_source_coords(traces),
self.get_receiver_coords(traces))
if 'MuteShortOffsets' in PAR.MUTE:
traces = signal.mute_short_offsets(traces,
PAR.MUTE_SHORT_OFFSETS_DIST,
self.get_source_coords(traces),
self.get_receiver_coords(traces))
if 'MuteLongOffsets' in PAR.MUTE:
traces = signal.mute_long_offsets(traces,
PAR.MUTE_LONG_OFFSETS_DIST,
self.get_source_coords(traces),
self.get_receiver_coords(traces))
return traces
def apply_normalize(self, traces):
if not PAR.NORMALIZE:
return traces
if 'NormalizeEventsL1' in PAR.NORMALIZE:
# normalize event by L1 norm of all traces
w = 0.
for tr in traces:
w += np.linalg.norm(tr.data, ord=1)
for tr in traces:
tr.data /= w
elif 'NormalizeEventsL2' in PAR.NORMALIZE:
# normalize event by L2 norm of all traces
w = 0.
for tr in traces:
w += np.linalg.norm(tr.data, ord=2)
for tr in traces:
tr.data /= w
if 'NormalizeTracesL1' in PAR.NORMALIZE:
# normalize each trace by its L1 norm
for tr in traces:
w = np.linalg.norm(tr.data, ord=1)
if w > 0:
tr.data /= w
elif 'NormalizeTracesL2' in PAR.NORMALIZE:
# normalize each trace by its L2 norm
for tr in traces:
w = np.linalg.norm(tr.data, ord=2)
if w > 0:
tr.data /= w
return traces
def apply_filter_backwards(self, traces):
for tr in traces:
tr.data = np.flip(tr.data)
traces = self.apply_filter()
for tr in traces:
tr.data = np.flip(tr.data)
return traces
# Additional parameter checking
def check_filter(self):
""" Checks filter settings
"""
assert getset(PAR.FILTER) < set([
'Bandpass',
'Lowpass',
'Highpass'])
if PAR.FILTER == 'Bandpass':
if 'FREQMIN' not in PAR:
raise ParameterError('FREQMIN')
if 'FREQMAX' not in PAR:
raise ParameterError('FREQMAX')
assert 0 < PAR.FREQMIN
assert PAR.FREQMIN < PAR.FREQMAX
assert PAR.FREQMAX < np.inf
elif PAR.FILTER == 'Lowpass':
raise NotImplementedError
if 'FREQ' not in PAR:
raise ParameterError('FREQ')
assert 0 < PAR.FREQ <= np.inf
elif PAR.FILTER == 'Highpass':
raise NotImplementedError
if 'FREQ' not in PAR:
raise ParameterError('FREQ')
assert 0 <= PAR.FREQ < np.inf
def check_mute(self):
""" Checks mute settings
"""
if not PAR.MUTE:
return
assert getset(PAR.MUTE) <= set([
'MuteEarlyArrivals',
'MuteLateArrivals',
'MuteShortOffsets',
'MuteLongOffsets'])
if 'MuteEarlyArrivals' in PAR.MUTE:
assert 'MUTE_EARLY_ARRIVALS_SLOPE' in PAR
assert 'MUTE_EARLY_ARRIVALS_CONST' in PAR
assert PAR.MUTE_EARLY_ARRIVALS_SLOPE >= 0.
if 'MuteLateArrivals' in PAR.MUTE:
assert 'MUTE_LATE_ARRIVALS_SLOPE' in PAR
assert 'MUTE_LATE_ARRIVALS_CONST' in PAR
assert PAR.MUTE_LATE_ARRIVALS_SLOPE >= 0.
if 'MuteShortOffsets' in PAR.MUTE:
assert 'MUTE_SHORT_OFFSETS_DIST' in PAR
assert 0 < PAR.MUTE_SHORT_OFFSETS_DIST
if 'MuteLongOffsets' in PAR.MUTE:
assert 'MUTE_LONG_OFFSETS_DIST' in PAR
assert 0 < PAR.MUTE_LONG_OFFSETS_DIST
if 'MuteShortOffsets' not in PAR.MUTE:
setattr(PAR, 'MUTE_SHORT_OFFSETS_DIST', 0.)
if 'MuteLongOffsets' not in PAR.MUTE:
setattr(PAR, 'MUTE_LONG_OFFSETS_DIST', 0.)
def check_normalize(self):
assert getset(PAR.NORMALIZE) < set([
'NormalizeTracesL1',
'NormalizeTracesL2',
'NormalizeEventsL1',
'NormalizeEventsL2'])
# Utility functions
def get_time_scheme(self, traces):
""" FIXME: extract time scheme from trace headers rather than
parameters file.
Note from Alexis Bottero : it is actually better like this in
my opinion because this allows for longer traces to be processed.
Indeed, in su format only 2 bytes are dedicated to the number of
samples which is supposed to be stored as an unsigned int. The
maximum NT which can be stored in the header is then 32762 whereas
there is no limit in principle.
"""
nt = PAR.NT
dt = PAR.DT
t0 = 0.
return nt, dt, t0
def get_network_size(self, traces):
nrec = len(traces)
nsrc = 1
return nrec, nsrc
def get_receiver_coords(self, traces):
if PAR.FORMAT in ['SU', 'su']:
rx = []
ry = []
rz = []
for trace in traces:
rx += [trace.stats.su.trace_header.group_coordinate_x]
ry += [trace.stats.su.trace_header.group_coordinate_y]
rz += [0.]
return rx, ry, rz
else:
raise NotImplementedError
def get_source_coords(self, traces):
if PAR.FORMAT in ['SU', 'su']:
sx = []
sy = []
sz = []
for trace in traces:
sx += [trace.stats.su.trace_header.source_coordinate_x]
sy += [trace.stats.su.trace_header.source_coordinate_y]
sz += [0.]
return sx, sy, sz
else:
raise NotImplementedError
|
|
from ..titanic import digital
from ..titanic import gmpmath
from ..titanic.ops import OP
class MPNum(digital.Digital):
# must be implemented in subclasses
@classmethod
def _select_context(cls, *args, ctx=None):
raise ValueError('virtual method: unimplemented')
@classmethod
def _round_to_context(cls, unrounded, ctx=None, strict=False):
raise ValueError('virtual method: unimplemented')
# most operations
def add(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.add, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sub(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.sub, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def mul(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.mul, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def div(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.div, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sqrt(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.sqrt, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def fma(self, other1, other2, ctx=None):
ctx = self._select_context(self, other1, other2, ctx=ctx)
result = gmpmath.compute(OP.fma, self, other1, other2, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def neg(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.neg, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def copysign(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.copysign, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def fabs(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.fabs, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def fdim(self, other, ctx=None):
# emulated
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.sub, self, other, prec=ctx.p)
zero = digital.Digital(negative=False, c=0, exp=0)
if result < zero:
return type(self)(negative=False, c=0, exp=0, inexact=False, rc=0)
else:
# never return negative zero
rounded = self._round_to_context(result, ctx=ctx, strict=True)
return type(self)(rounded, negative=False)
def fmax(self, other, ctx=None):
# emulated
ctx = self._select_context(self, other, ctx=ctx)
if self.isnan:
return self._round_to_context(other, ctx=ctx, strict=False)
elif other.isnan:
return self._round_to_context(self, ctx=ctx, strict=False)
else:
return self._round_to_context(max(self, other), ctx=ctx, strict=False)
def fmin(self, other, ctx=None):
# emulated
ctx = self._select_context(self, other, ctx=ctx)
if self.isnan:
return self._round_to_context(other, ctx=ctx, strict=False)
elif other.isnan:
return self._round_to_context(self, ctx=ctx, strict=False)
else:
return self._round_to_context(min(self, other), ctx=ctx, strict=False)
def fmod(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.fmod, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def remainder(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.remainder, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def ceil(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.ceil, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def floor(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.floor, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def nearbyint(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.nearbyint, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def round(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.round, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def trunc(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.trunc, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def acos(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.acos, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def acosh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.acosh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def asin(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.asin, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def asinh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.asinh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def atan(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.atan, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def atan2(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.atan2, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def atanh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.atanh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def cos(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.cos, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def cosh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.cosh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sin(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.sin, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def sinh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.sinh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def tan(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.tan, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def tanh(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.tanh, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def exp_(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.exp, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def exp2(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.exp2, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def expm1(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.expm1, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log10(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log10, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log1p(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log1p, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def log2(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.log2, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def cbrt(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.cbrt, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def hypot(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
result = gmpmath.compute(OP.hypot, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def pow(self, other, ctx=None):
ctx = self._select_context(self, other, ctx=ctx)
if other.is_zero():
# avoid possibly passing nan to gmpmath.compute
return type(self)(negative=False, c=1, exp=0, inexact=False, rc=0)
result = gmpmath.compute(OP.pow, self, other, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def erf(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.erf, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def erfc(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.erfc, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def lgamma(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.lgamma, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def tgamma(self, ctx=None):
ctx = self._select_context(self, ctx=ctx)
result = gmpmath.compute(OP.tgamma, self, prec=ctx.p)
return self._round_to_context(result, ctx=ctx, strict=True)
def isfinite(self):
return not (self.isinf or self.isnan)
# isinf and isnan are properties
# isnormal is implementation specific - override if necessary
def isnormal(self):
return not (
self.is_zero()
or self.isinf
or self.isnan
)
def signbit(self):
return self.negative
|
|
import logging
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils.encoding import smart_unicode
from restlib2.http import codes
from restlib2.params import StrParam, IntParam, BoolParam
from modeltree.tree import MODELTREE_DEFAULT_ALIAS, trees
from avocado.events import usage
from avocado.query import pipeline
from .base import FieldBase, is_field_orphaned
from ..pagination import PaginatorResource, PaginatorParametizer
from ...links import patch_response, reverse_tmpl
log = logging.getLogger(__name__)
class FieldValuesParametizer(PaginatorParametizer):
aware = BoolParam(False)
limit = IntParam(10)
tree = StrParam(MODELTREE_DEFAULT_ALIAS, choices=trees)
processor = StrParam('default', choices=pipeline.query_processors)
query = StrParam()
random = IntParam()
class FieldValues(FieldBase, PaginatorResource):
"""Field Values Resource
This resource can be overriden for any field to use a more
performant search implementation.
"""
parametizer = FieldValuesParametizer
def get_base_values(self, request, instance, params):
"Returns the base queryset for this field."
# The `aware` flag toggles the behavior of the distribution by making
# relative to the applied context or none
if params['aware']:
context = self.get_context(request)
else:
context = self.get_context(request, attrs={})
return context.apply(queryset=instance.model.objects.all())
def get_all_values(self, request, instance, queryset):
"Returns all distinct values for this field."
results = []
for value, label in instance.choices(queryset=queryset):
results.append({
'label': label,
'value': value,
})
return results
def get_search_values(self, request, instance, query, queryset):
"""
Performs a search on the underlying data for a field.
This method can be overridden to use an alternate search
implementation.
"""
results = []
value_labels = instance.value_labels(queryset=queryset)
for value in instance.search(query, queryset=queryset):
results.append({
'label': value_labels.get(value, smart_unicode(value)),
'value': value,
})
return results
def get_random_values(self, request, instance, random, queryset):
"""
Returns a random set of value/label pairs.
This is useful for pre-populating documents or form fields with
example data.
"""
values = instance.random(random, queryset=queryset)
results = []
for value in values:
results.append({
'label': instance.get_label(value, queryset=queryset),
'value': value,
})
return results
def get_link_templates(self, request):
uri = request.build_absolute_uri
return {
'parent': reverse_tmpl(
uri, 'serrano:field', {'pk': (int, 'parent_id')})
}
def get(self, request, pk):
instance = self.get_object(request, pk=pk)
if is_field_orphaned(instance):
data = {
'message': 'Orphaned fields do not support values calls.'
}
return self.render(
request, data, status=codes.unprocessable_entity)
params = self.get_params(request)
if params['aware']:
context = self.get_context(request)
else:
context = None
QueryProcessor = pipeline.query_processors[params['processor']]
processor = QueryProcessor(tree=instance.model, context=context)
queryset = processor.get_queryset(request=request)
if params['random']:
# In the case that the queryset contains a population smaller than
# the number of random items being requested, a ValueError will be
# triggered. Instead of passing the error on to the client, we
# simply return all the possible values.
try:
return self.get_random_values(
request, instance, params['random'], queryset)
except ValueError:
return instance.values(queryset=queryset)
page = params['page']
limit = params['limit']
# If a query term is supplied, perform the icontains search.
if params['query']:
usage.log('items', instance=instance, request=request, data={
'query': params['query'],
})
values = self.get_search_values(
request, instance, params['query'], queryset)
else:
values = self.get_all_values(request, instance, queryset)
# No page specified, return everything.
if page is None:
return values
paginator = self.get_paginator(values, limit=limit)
page = paginator.page(page)
# Get paginator-based response.
data = self.get_page_response(request, paginator, page)
data.update({
'items': page.object_list,
})
# Add links.
path = reverse('serrano:field-values', kwargs={'pk': pk})
links = self.get_page_links(request, path, page, extra=params)
templates = self.get_link_templates(request)
response = self.render(request, content=data)
return patch_response(request, response, links, templates)
def post(self, request, pk):
instance = self.get_object(request, pk=pk)
params = self.get_params(request)
if not request.data:
data = {
'message': 'Error parsing data',
}
return self.render(request, data,
status=codes.unprocessable_entity)
if isinstance(request.data, dict):
array = [request.data]
else:
array = request.data
values = []
labels = []
array_map = {}
# Separate out the values and labels for the lookup. Track indexes
# maintain order of array
for i, datum in enumerate(array):
# Value takes precedence over label if supplied.
if 'value' in datum:
array_map[i] = 'value'
values.append(datum['value'])
elif 'label' in datum:
array_map[i] = 'label'
labels.append(datum['label'])
else:
data = {
'message': 'Error parsing value or label'
}
return self.render(request, data,
status=codes.unprocessable_entity)
value_field_name = instance.field_name
label_field_name = instance.label_field.name
# Note, this return a context-aware or naive queryset depending
# on params. Get the value and label fields so they can be filled
# in below.
queryset = self.get_base_values(request, instance, params)\
.values_list(value_field_name, label_field_name)
lookup = Q()
# Validate based on the label.
if labels:
lookup |= Q(**{'{0}__in'.format(label_field_name): labels})
if values:
lookup |= Q(**{'{0}__in'.format(value_field_name): values})
results = queryset.filter(lookup)
value_labels = dict(results)
label_values = dict([(v, k) for k, v in value_labels.items()])
for i, datum in enumerate(array):
if array_map[i] == 'label':
valid = datum['label'] in label_values
if valid:
value = label_values[datum['label']]
else:
value = datum['label']
datum['valid'] = valid
datum['value'] = value
else:
valid = datum['value'] in value_labels
if valid:
label = value_labels[datum['value']]
else:
label = smart_unicode(datum['value'])
datum['valid'] = valid
datum['label'] = label
usage.log('validate', instance=instance, request=request, data={
'count': len(array),
})
# Return the augmented data.
return request.data
|
|
#! /usr/env/python
"""
This module attempts to "component-ify" GT's Fastscape stream power erosion.
Created DEJH, March 2014.
"""
from __future__ import print_function
import numpy
import warnings
from landlab import ModelParameterDictionary, Component
from landlab.core.model_parameter_dictionary import MissingKeyError, \
ParameterValueError
from landlab.utils.decorators import use_file_name_or_kwds
from landlab.field.scalar_data_fields import FieldError
from scipy.optimize import newton, fsolve
UNDEFINED_INDEX = -1
class FastscapeEroder(Component):
'''
This class uses the Braun-Willett Fastscape approach to calculate the
amount of erosion at each node in a grid, following a stream power
framework. This should allow it to be stable against larger timesteps
than an explicit stream power scheme.
Stream power erosion is implemented as::
E = K * (rainfall_intensity*A)**m * S**n - threshold_sp,
if K * A**m * S**n > threshold_sp, and::
E = 0,
if K * A**m * S**n <= threshold_sp.
This module assumes you have already run
:func:`landlab.components.flow_routing.route_flow_dn.FlowRouter.route_flow`
in the same timestep. It looks for 'flow__upstream_node_order',
'flow__link_to_receiver_node', 'drainage_area', 'flow__receiver_node', and
'topographic__elevation' at the nodes in the grid. 'drainage_area' should
be in area upstream, not volume (i.e., set runoff_rate=1.0 when calling
FlowRouter.route_flow).
The primary method of this class is :func:`run_one_step`.
Construction::
FastscapeEroder(grid, K_sp=None, m_sp=0.5, n_sp=1., threshold_sp=0.,
rainfall_intensity=1.)
Parameters
----------
grid : ModelGrid
A grid.
K_sp : float, array, or field name
K in the stream power equation (units vary with other parameters).
m_sp : float, optional
m in the stream power equation (power on drainage area).
n_sp : float, optional, ~ 0.5<n_sp<4.
n in the stream power equation (power on slope).
Performance will be VERY degraded if n < 1.
threshold_sp : float, array, or field name
The threshold stream power.
rainfall_intensity : float; optional
Modifying factor on drainage area to convert it to a true water
volume flux in (m/time). i.e., E = K * (r_i*A)**m * S**n. For a time
varying rainfall intensity, pass rainfall_intensity_if_used to
`run_one_step`. For a spatially variable rainfall, use the
StreamPowerEroder component.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab import CLOSED_BOUNDARY, FIXED_VALUE_BOUNDARY
>>> from landlab.components import FlowRouter
>>> mg = RasterModelGrid((5, 5), 10.)
>>> z = np.array([7., 7., 7., 7., 7.,
... 7., 5., 3.2, 6., 7.,
... 7., 2., 3., 5., 7.,
... 7., 1., 1.9, 4., 7.,
... 7., 0., 7., 7., 7.])
>>> z = mg.add_field('node', 'topographic__elevation', z)
>>> fr = FlowRouter(mg)
>>> sp = FastscapeEroder(mg, K_sp=1.)
>>> fr.run_one_step()
>>> sp.run_one_step(dt=1.)
>>> z # doctest: +NORMALIZE_WHITESPACE
array([ 7. , 7. , 7. , 7. , 7. ,
7. , 2.92996598, 2.02996598, 4.01498299, 7. ,
7. , 0.85993197, 1.87743897, 3.28268321, 7. ,
7. , 0.28989795, 0.85403051, 2.42701526, 7. ,
7. , 0. , 7. , 7. , 7. ])
>>> mg2 = RasterModelGrid((3, 7), 1.)
>>> z = np.array(mg2.node_x**2.)
>>> z = mg2.add_field('node', 'topographic__elevation', z)
>>> mg2.status_at_node[mg2.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_right_edge] = CLOSED_BOUNDARY
>>> fr2 = FlowRouter(mg2)
>>> sp2 = FastscapeEroder(mg2, K_sp=0.1, m_sp=0., n_sp=2.,
... threshold_sp=2.)
>>> fr2.run_one_step()
>>> sp2.run_one_step(dt=10.)
>>> z.reshape((3, 7))[1, :] # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 4. , 8.52493781,
13.29039716, 18.44367965, 36. ])
>>> mg3 = RasterModelGrid((3, 7), 1.)
>>> z = np.array(mg3.node_x**2.)
>>> z = mg3.add_field('node', 'topographic__elevation', z)
>>> mg3.status_at_node[mg3.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_right_edge] = CLOSED_BOUNDARY
>>> fr3 = FlowRouter(mg3)
>>> K_field = mg3.ones('node') # K can be a field
>>> sp3 = FastscapeEroder(mg3, K_sp=K_field, m_sp=1., n_sp=0.6,
... threshold_sp=mg3.node_x,
... rainfall_intensity=2.)
>>> fr3.run_one_step()
>>> sp3.run_one_step(1.)
>>> z.reshape((3, 7))[1, :] # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 0.0647484 , 0.58634455, 2.67253503,
8.49212152, 20.92606987, 36. ])
>>> previous_z = z.copy()
>>> sp3.run_one_step(1., rainfall_intensity_if_used=0.)
>>> np.allclose(z, previous_z)
True
'''
_name = 'FastscapeEroder'
_input_var_names = (
'topographic__elevation',
'drainage_area',
'flow__link_to_receiver_node',
'flow__upstream_node_order',
'flow__receiver_node',
)
_output_var_names = (
'topographic__elevation',
)
_var_units = {
'topographic__elevation': 'm',
'drainage_area': 'm**2',
'flow__link_to_receiver_node': '-',
'flow__upstream_node_order': '-',
'flow__receiver_node': '-',
}
_var_mapping = {
'topographic__elevation': 'node',
'drainage_area': 'node',
'flow__link_to_receiver_node': 'node',
'flow__upstream_node_order': 'node',
'flow__receiver_node': 'node',
}
_var_doc = {
'topographic__elevation': 'Land surface topographic elevation',
'drainage_area':
"Upstream accumulated surface area contributing to the node's "
"discharge",
'flow__link_to_receiver_node':
'ID of link downstream of each node, which carries the discharge',
'flow__upstream_node_order':
'Node array containing downstream-to-upstream ordered list of '
'node IDs',
'flow__receiver_node':
'Node array of receivers (node that receives flow from current '
'node)',
}
@use_file_name_or_kwds
def __init__(self, grid, K_sp=None, m_sp=0.5, n_sp=1., threshold_sp=0.,
rainfall_intensity=1., **kwds):
"""
Initialize the Fastscape stream power component. Note: a timestep,
dt, can no longer be supplied to this component through the input file.
It must instead be passed directly to the run method.
Parameters
----------
grid : ModelGrid
A grid.
K_sp : float, array, or field name
K in the stream power equation (units vary with other parameters).
m_sp : float, optional
m in the stream power equation (power on drainage area).
n_sp : float, optional
n in the stream power equation (power on slope).
rainfall intensity : float, array, or field name; optional
Modifying factor on drainage area to convert it to a true water
volume flux in (m/time). i.e., E = K * (r_i*A)**m * S**n
"""
self._grid = grid
self.K = K_sp # overwritten below in special cases
self.m = float(m_sp)
self.n = float(n_sp)
if type(threshold_sp) in (float, int):
self.thresholds = float(threshold_sp)
else:
if type(threshold_sp) is str:
self.thresholds = self.grid.at_node[threshold_sp]
else:
self.thresholds = threshold_sp
assert self.thresholds.size == self.grid.number_of_nodes
# make storage variables
self.A_to_the_m = grid.zeros(at='node')
self.alpha = grid.empty(at='node')
self.alpha_by_flow_link_lengthtothenless1 = numpy.empty_like(
self.alpha)
try:
self.grid._diagonal_links_at_node # calc number of diagonal links
except AttributeError:
pass # was not a raster
if self.K is None:
raise ValueError('K_sp must be set as a float, node array, or ' +
'field name. It was None.')
# now handle the inputs that could be float, array or field name:
# some support here for old-style inputs
if type(K_sp) is str:
if K_sp == 'array':
self.K = None
else:
self.K = self._grid.at_node[K_sp]
elif type(K_sp) in (float, int): # a float
self.K = float(K_sp)
elif len(K_sp) == self.grid.number_of_nodes:
self.K = numpy.array(K_sp)
else:
raise TypeError('Supplied type of K_sp ' +
'was not recognised, or array was ' +
'not nnodes long!')
if type(rainfall_intensity) is str:
raise ValueError('This component can no longer handle ' +
'spatially variable rainfall. Use ' +
'StreamPowerEroder.')
if rainfall_intensity == 'array':
self._r_i = None
else:
self._r_i = self._grid.at_node[rainfall_intensity]
elif type(rainfall_intensity) in (float, int): # a float
self._r_i = float(rainfall_intensity)
elif len(rainfall_intensity) == self.grid.number_of_nodes:
raise ValueError('This component can no longer handle ' +
'spatially variable rainfall. Use ' +
'StreamPowerEroder.')
self._r_i = numpy.array(rainfall_intensity)
else:
raise TypeError('Supplied type of rainfall_' +
'intensity was not recognised!')
# We now forbid changing of the field name
if 'value_field' in kwds.keys():
raise ValueError('This component can no longer support variable' +
'field names. Use "topographic__elevation".')
def erode(self, grid_in, dt=None, K_if_used=None, flooded_nodes=None,
rainfall_intensity_if_used=None):
"""
This method implements the stream power erosion, following the Braun-
Willett (2013) implicit Fastscape algorithm. This should allow it to
be stable against larger timesteps than an explicit stream power
scheme.
This driving method for this component is now superceded by the new,
standardized wrapper :func:`run_one_step`, but is retained for
back compatibility.
Set 'K_if_used' as a field name or nnodes-long array if you set K_sp as
'array' during initialization.
It returns the grid, in which it will have modified the value of
*value_field*, as specified in component initialization.
Parameters
----------
grid_in : a grid
This is a dummy argument maintained for component back-
compatibility. It is superceded by the copy of the grid passed
during initialization.
dt : float
Time-step size. If you are calling the deprecated function
:func:`gear_timestep`, that method will supercede any value
supplied here.
K_if_used : array (optional)
Set this to an array if you set K_sp to 'array' in your input file.
flooded_nodes : ndarray of int (optional)
IDs of nodes that are flooded and should have no erosion. If not
provided but flow has still been routed across depressions, erosion
may still occur beneath the apparent water level (though will
always still be positive).
rainfall_intensity_if_used : float or None (optional)
Supply to drive this component with a time-varying spatially
constant rainfall.
Returns
-------
grid
A reference to the grid.
"""
self.alpha = numpy.zeros(self._grid.number_of_nodes)
self.alpha_by_flow_link_lengthtothenless1 = numpy.zeros(self._grid.number_of_nodes)
upstream_order_IDs = self._grid['node']['flow__upstream_node_order']
z = self._grid['node']['topographic__elevation']
defined_flow_receivers = numpy.not_equal(self._grid['node'][
'flow__link_to_receiver_node'], UNDEFINED_INDEX)
flow_link_lengths = self._grid._length_of_link_with_diagonals[
self._grid['node']['flow__link_to_receiver_node'][
defined_flow_receivers]]
# make arrays from input the right size
if type(self.K) is numpy.ndarray:
K_here = self.K[defined_flow_receivers]
else:
K_here = self.K
if rainfall_intensity_if_used is not None:
assert type(rainfall_intensity_if_used) in (float, int)
r_i_here = float(rainfall_intensity_if_used)
else:
r_i_here = self._r_i
if dt is None:
dt = self.dt
assert dt is not None, ('Fastscape component could not find a dt to ' +
'use. Pass dt to the run_one_step() method.')
if self.K is None: # "old style" setting of array
assert K_if_used is not None
self.K = K_if_used
numpy.power(self._grid['node']['drainage_area'], self.m,
out=self.A_to_the_m)
self.alpha[defined_flow_receivers] = r_i_here**self.m * K_here * dt * \
self.A_to_the_m[defined_flow_receivers] / flow_link_lengths
flow_receivers = self._grid['node']['flow__receiver_node']
n_nodes = upstream_order_IDs.size
alpha = self.alpha
# Handle flooded nodes, if any (no erosion there)
if flooded_nodes is not None:
alpha[flooded_nodes] = 0.
else:
reversed_flow = z < z[flow_receivers]
# this check necessary if flow has been routed across depressions
alpha[reversed_flow] = 0.
self.alpha_by_flow_link_lengthtothenless1[
defined_flow_receivers] = (alpha[defined_flow_receivers] /
flow_link_lengths**(self.n - 1.))
alpha_divided = self.alpha_by_flow_link_lengthtothenless1
n = float(self.n)
threshdt = self.thresholds * dt
if type(self.thresholds) is float:
from .cfuncs import erode_with_link_alpha_fixthresh
erode_with_link_alpha_fixthresh(upstream_order_IDs, flow_receivers,
threshdt, alpha_divided, n, z)
else:
from .cfuncs import erode_with_link_alpha_varthresh
erode_with_link_alpha_varthresh(upstream_order_IDs, flow_receivers,
threshdt, alpha_divided, n, z)
# # This replicates the cython for testing:
# for i in range(upstream_order_IDs.size):
# src_id = upstream_order_IDs[i]
# dst_id = flow_receivers[src_id]
# thresh = threshdt[i]
# if src_id != dst_id:
# next_z = z[src_id]
# prev_z = 0.
# while True:
# #for j in range(2):
# z_diff = next_z - z[dst_id]
# f = alpha_divided[src_id] * pow(z_diff, n - 1.)
# # if z_diff -> 0, pow -> nan (in reality, inf)
# # print (f, prev_z, next_z, z_diff, z[dst_id])
# next_z = next_z - ((next_z - z[src_id] + (
# f*z_diff - thresh).clip(0.)) / (1. + n * f))
# if next_z < z[dst_id]:
# next_z = z[dst_id] + 1.e-15
# # ^maintain connectivity
# if next_z != 0.:
# if (numpy.fabs((next_z - prev_z)/next_z) <
# 1.48e-08) or (n == 1.):
# break
# else:
# break
# prev_z = next_z
# if next_z < z[src_id]:
# z[src_id] = next_z
return self._grid
def run_one_step(self, dt, flooded_nodes=None,
rainfall_intensity_if_used=None, **kwds):
"""
This method implements the stream power erosion across one time
interval, dt, following the Braun-Willett (2013) implicit Fastscape
algorithm.
This follows Landlab standardized component design, and supercedes the
old driving method :func:`erode`.
Parameters
----------
dt : float
Time-step size
flooded_nodes : ndarray of int (optional)
IDs of nodes that are flooded and should have no erosion. If not
provided but flow has still been routed across depressions, erosion
may still occur beneath the apparent water level (though will
always still be positive).
rainfall_intensity_if_used : float or None (optional)
Supply to drive this component with a time-varying spatially
constant rainfall.
"""
self.erode(grid_in=self._grid, dt=dt, flooded_nodes=flooded_nodes,
rainfall_intensity_if_used=rainfall_intensity_if_used)
|
|
import logging
import os
import shutil
import tempfile
from crawler_exceptions import CrawlError, CrawlUnsupportedPackageManager
from utils import osinfo
from utils.features import PackageFeature
from utils.misc import subprocess_run
logger = logging.getLogger('crawlutils')
def get_dpkg_packages(
root_dir='/',
dbpath='var/lib/dpkg',
installed_since=0):
if os.path.isabs(dbpath):
logger.warning(
'dbpath: ' +
dbpath +
' is defined absolute. Ignoring prefix: ' +
root_dir +
'.')
# Update for a different route.
dbpath = os.path.join(root_dir, dbpath)
output = subprocess_run(['dpkg-query', '-W',
'--admindir={0}'.format(dbpath),
'-f=${Package}|${Version}'
'|${Architecture}|${Installed-Size}\n'],
shell=False)
dpkglist = output.strip('\n')
if dpkglist:
for dpkginfo in dpkglist.split('\n'):
(name, version, architecture, size) = dpkginfo.split(r'|')
# dpkg does not provide any installtime field
# feature_key = '{0}/{1}'.format(name, version) -->
# changed to below per Suriya's request
feature_key = '{0}'.format(name, version)
yield (feature_key, PackageFeature(None, name,
size, version,
architecture))
def get_rpm_packages(
root_dir='/',
dbpath='var/lib/rpm',
installed_since=0,
reload_needed=False):
if os.path.isabs(dbpath):
logger.warning(
'dbpath: ' +
dbpath +
' is defined absolute. Ignoring prefix: ' +
root_dir +
'.')
# update for a different route
dbpath = os.path.join(root_dir, dbpath)
try:
if reload_needed:
reloaded_db_dir = tempfile.mkdtemp()
_rpm_reload_db(root_dir, dbpath, reloaded_db_dir)
dbpath = reloaded_db_dir
output = subprocess_run(['rpm',
'--dbpath',
dbpath,
'-qa',
'--queryformat',
'%{installtime}|%{name}|%{version}'
'-%{release}|%{arch}|%{size}\n'],
shell=False,
ignore_failure=True)
# We ignore failures because sometimes rpm returns rc=1 but still
# outputs all the data.
rpmlist = output.strip('\n')
finally:
if reload_needed:
logger.debug('Deleting directory: %s' % (reloaded_db_dir))
shutil.rmtree(reloaded_db_dir)
if rpmlist:
for rpminfo in rpmlist.split('\n'):
(installtime, name, version, architecture, size) = \
rpminfo.split(r'|')
"""
if int(installtime) <= installed_since: --> this
barfs for sth like: 1376416422. Consider try: xxx
except ValueError: pass
"""
if installtime <= installed_since:
continue
"""
feature_key = '{0}/{1}'.format(name, version) -->
changed to below per Suriya's request
"""
feature_key = '{0}'.format(name, version)
yield (feature_key,
PackageFeature(installtime,
name, size, version, architecture))
def _rpm_reload_db(
root_dir='/',
dbpath='var/lib/rpm',
reloaded_db_dir='/tmp/'):
"""
Dumps and reloads the rpm database.
Returns the path to the new rpm database, or raises RuntimeError if the
dump and load commands failed.
"""
try:
dump_dir = tempfile.mkdtemp()
subprocess_run(['/usr/bin/db_dump',
os.path.join(dbpath, 'Packages'),
'-f',
os.path.join(dump_dir, 'Packages')],
shell=False)
subprocess_run(['/usr/bin/db_load',
'-f',
os.path.join(dump_dir, 'Packages'),
os.path.join(reloaded_db_dir, 'Packages')],
shell=False)
finally:
logger.debug('Deleting directory: %s' % (dump_dir))
shutil.rmtree(dump_dir)
return reloaded_db_dir
# from UK crawler codebase
def apk_parser(filename):
try:
db_contents = open(filename).read()
packages = db_contents.split('\n\n')
logger.debug('Found {} APK packages'.format(len(packages)))
for package in packages:
if package:
attributes = package.split('\n')
name = ""
version = ""
architecture = ""
size = ""
for attribute in attributes:
if (attribute.startswith('P:')):
name = attribute[2:]
elif (attribute.startswith('V:')):
version = attribute[2:]
elif (attribute.startswith('A:')):
architecture = attribute[2:]
elif (attribute.startswith('S:')):
size = attribute[2:]
yield (name, PackageFeature(None, name,
size, version,
architecture))
except IOError as e:
logger.error('Failed to read APK database to obtain packages. '
'Check if %s is present. [Exception: %s: %s]'
' ' % (filename, type(e).__name__, e.strerror))
raise
def get_apk_packages(
root_dir='/',
dbpath='lib/apk/db'):
if os.path.isabs(dbpath):
logger.warning(
'dbpath: ' +
dbpath +
' is defined absolute. Ignoring prefix: ' +
root_dir +
'.')
# Update for a different route.
dbpath = os.path.join(root_dir, dbpath)
for feature_key, package_feature in apk_parser(
os.path.join(dbpath, 'installed')):
yield (feature_key, package_feature)
def crawl_packages(
dbpath=None,
root_dir='/',
installed_since=0,
reload_needed=True):
# package attributes: ["installed", "name", "size", "version"]
logger.debug('Crawling Packages')
try:
pkg_manager = _get_package_manager(root_dir)
if pkg_manager == 'dpkg':
dbpath = dbpath or 'var/lib/dpkg'
for (key, feature) in get_dpkg_packages(
root_dir, dbpath, installed_since):
yield (key, feature, 'package')
elif pkg_manager == 'rpm':
dbpath = dbpath or 'var/lib/rpm'
for (key, feature) in get_rpm_packages(
root_dir, dbpath, installed_since, reload_needed):
yield (key, feature, 'package')
elif pkg_manager == 'apk':
dbpath = dbpath or 'lib/apk/db'
for (key, feature) in get_apk_packages(
root_dir, dbpath):
yield (key, feature, 'package')
else:
logger.warning('Unsupported package manager for Linux distro')
except Exception as e:
logger.error('Error crawling packages',
exc_info=True)
raise CrawlError(e)
def _get_package_manager(root_dir):
result = osinfo.get_osinfo(mount_point=root_dir)
if result:
os_distro = result['os']
else:
raise CrawlUnsupportedPackageManager()
pkg_manager = None
if os_distro in ['ubuntu', 'debian']:
pkg_manager = 'dpkg'
elif os_distro in ['redhat', 'red hat', 'rhel', 'fedora', 'centos']:
pkg_manager = 'rpm'
elif os_distro in ['alpine']:
pkg_manager = 'apk'
elif os.path.exists(os.path.join(root_dir, 'var/lib/dpkg')):
pkg_manager = 'dpkg'
elif os.path.exists(os.path.join(root_dir, 'var/lib/rpm')):
pkg_manager = 'rpm'
return pkg_manager
|
|
import math
import os
import threading
from collections import defaultdict
from typing import Dict
import copy
from twisted.internet.address import IPv4Address
import bptc
from bptc.data.consensus import divide_rounds, decide_fame, find_order
from bptc.data.event import Event, Parents
from bptc.data.member import Member
from bptc.utils.toposort import toposort
from bptc.data.transaction import MoneyTransaction, TransactionStatus, PublishNameTransaction
class Hashgraph:
"""
The Hashgraph - storing the events of all nodes
"""
def __init__(self, me, debug_mode=False):
self.lock = threading.RLock()
# Member: A reference to the current user. For convenience (e.g. signing)
self.me = me
self.debug_mode = debug_mode
# {member-id => Member}: All members we know
if me is not None:
self.known_members = {me.id: me}
# {event-hash => event}: Dictionary mapping hashes to events
self.lookup_table = {}
# {event-hash}: Events for which the final order has not yet been determined
self.unordered_events = set()
# [event-hash]: Final order of events
self.ordered_events = []
self.next_ordered_event_idx_to_process = 0
self.idx = {}
# {round-num}: rounds where fame is fully decided
self.rounds_with_decided_fame = set()
# {round-num => {member-pk => event-hash}}:
self.witnesses = defaultdict(dict)
# {event-hash => set(event-hash)}: Cache for event's self-children (used for fast fork check)
self.self_children_cache = defaultdict(set)
# set(member-id): A set of member who forked. Members who forked have no visible events.
self.fork_blacklist = set()
@property
def total_stake(self) -> int:
"""
:return: The total stake in the hashgraph
"""
return sum([member.stake for _, member in self.known_members.items()])
@property
def supermajority_stake(self) -> int:
"""
:return: The stake needed for a supermajority (2/3 of total)
"""
return int(math.floor(2 * self.total_stake / 3))
def get_unknown_events_of(self, member: Member) -> Dict[str, Event]:
"""
Returns the presumably unknown events of a given member, in the same format as lookup_table
:param member: The member for which to return unknown events
:return: Dictionary mapping hashes to events
"""
result = dict(self.lookup_table)
head = member.head
if head is None:
return result
to_visit = {head}
visited = set()
while len(to_visit) > 0:
event_id = to_visit.pop()
if event_id not in visited:
event = result[event_id]
del result[event_id]
if event.parents.self_parent is not None:
to_visit.add(event.parents.self_parent)
if event.parents.other_parent is not None:
to_visit.add(event.parents.other_parent)
visited.add(event_id)
return result
def add_own_event(self, event: Event, calculate_consensus: bool = False):
"""
Adds an own event to the hashgraph
:param event: The event to be added
:param calculate_consensus: Whether the consensus should be calculated immediately
:return: None
"""
# Sign event body
event.sign(self.me.signing_key)
# Add event
self.add_event(event)
# Only do consensus if this is the first event
if calculate_consensus:
divide_rounds(self, [event])
decide_fame(self)
find_order(self)
self.process_ordered_events()
def add_event(self, event: Event):
# Set the event's correct height
if event.parents.self_parent:
event.height = self.lookup_table[event.parents.self_parent].height + 1
# Add event to graph
self.lookup_table[event.id] = event
# Update caches
self.unordered_events.add(event.id)
if self.known_members[event.verify_key].head is None or \
event.height > self.lookup_table[self.known_members[event.verify_key].head].height:
self.known_members[event.verify_key].head = event.id
if event.parents.self_parent is not None:
self.self_children_cache[event.parents.self_parent].add(event.id)
if len(self.self_children_cache[event.parents.self_parent]) > 1:
# We just added a fork
bptc.logger.warn("A fork was created! Blacklisting member and clearing visibility caches.")
# Blacklist the member who forked
self.fork_blacklist.add(event.verify_key)
# Visibility for events could have changed - throw away the caches
for e in self.lookup_table.values():
e.can_see_cache.clear()
def process_events(self, from_member: Member, events: Dict[str, Event]) -> None:
"""
Processes a list of events
:param from_member: The member from whom the events were received
:param events: The events to be processed
:return: None
"""
events = copy.deepcopy(events)
bptc.logger.debug("Processing {} events from {}...".format(len(events), from_member.verify_key[:6]))
# Only deal with valid events
events = filter_valid_events(events)
events_toposorted = toposort(events)
# Learn about other members
self.learn_members_from_events(events)
# Add all new events in topological order and check parent pointer
new_events = {}
for event in events_toposorted:
if event.id not in self.lookup_table:
if event.parents.self_parent is not None and event.parents.self_parent not in self.lookup_table:
bptc.logger.error('Self parent {} of {} not known. Ignore all data.'.
format(event.parents.self_parent[:6], event.id[:6]))
return
if event.parents.other_parent is not None and event.parents.other_parent not in self.lookup_table:
bptc.logger.error('Other parent {} of {} not known. Ignore all data'.
format(event.parents.other_parent[:6], event.id[:6]))
return
new_events[event.id] = event
self.add_event(event)
# Create a new event for the gossip
event = Event(self.me.verify_key, None, Parents(self.me.head, from_member.head))
self.add_own_event(event)
new_events[event.id] = event
# Figure out fame, order, etc.
divide_rounds(self, toposort(new_events))
decide_fame(self)
find_order(self)
self.process_ordered_events()
# Debug mode writes the DB to a file every 100 events.
if self.debug_mode:
number_events = (len(self.lookup_table) // 100) * 100
# Don't store when there are not enough events or it would overwrite
# the last temporary db
if number_events > 0 and number_events > self.debug_mode:
bptc.logger.debug('Store intermediate results containing about {} events'.format(number_events))
from bptc.data.db import DB
DB.save(self, temp=True)
self.debug_mode = (len(self.lookup_table) // 100) * 100
def learn_members_from_events(self, events: Dict[str, Event]) -> None:
"""
Goes through a list of events and learns their creators if they are not already known
:param events: The list of events
:return: None
"""
for event in events.values():
if event.verify_key not in self.known_members:
self.known_members[event.verify_key] = Member(event.verify_key, None)
def process_ordered_events(self):
for event_id in self.ordered_events[self.next_ordered_event_idx_to_process:len(self.ordered_events)]:
event = self.lookup_table[event_id]
if event.data is None:
continue
for transaction in event.data:
sender = self.known_members[event.verify_key]
if isinstance(transaction, MoneyTransaction):
receiver = self.known_members[transaction.receiver]
# Check if the sender has the funds
if sender.account_balance < transaction.amount or transaction.amount < 0:
transaction.status = TransactionStatus.DENIED
else:
sender.account_balance -= transaction.amount
receiver.account_balance += transaction.amount
transaction.status = TransactionStatus.CONFIRMED
elif isinstance(transaction, PublishNameTransaction):
sender.name = transaction.name
self.next_ordered_event_idx_to_process = len(self.ordered_events)
def parse_transaction(self, event, transaction, plain=False):
receiver = self.known_members[transaction.receiver].formatted_name if \
transaction.receiver in self.known_members else transaction.receiver
sender = self.known_members[event.verify_key].formatted_name if \
event.verify_key in self.known_members else event.verify_key
status = TransactionStatus.text_for_value(transaction.status)
is_received = transaction.receiver == self.me.to_verifykey_string()
amount = transaction.amount
comment = transaction.comment
time = event.time
rec = {
'receiver': receiver,
'sender': sender,
'amount': amount,
'comment': comment,
'time': time,
'status': status,
'is_received': is_received,
}
format_string = '{} [b]{} BPTC[/b] {} [b]{}[/b] ({}){}'
if plain:
format_string = '{} {} BPTC {} {} ({}){}'
rec['formatted'] = format_string.format(
'Received' if is_received else 'Sent',
amount,
'from' if rec['is_received'] else 'to',
sender if rec['is_received'] else receiver,
status,
'\n"{}"'.format(comment) if comment else '',
).replace('\n', ' - ' if plain else '\n')
return rec
def get_relevant_transactions(self, plain=False, show_all=False):
# Load transactions belonging to this member
transactions = []
events = list(self.lookup_table.values())
for e in events:
for t in e.data or []:
if isinstance(t, MoneyTransaction):
if show_all or self.me.to_verifykey_string() in [e.verify_key, t.receiver]:
transactions.append(self.parse_transaction(e, t, plain))
return sorted(transactions, key=lambda x: x['time'], reverse=True)
def filter_valid_events(events: Dict[str, Event]) -> Dict[str, Event]:
"""
Goes through a dict of events and returns a dict containing only the valid ones
:param events: The dict to be filtered
:return: A dict containing only valid events
"""
result = dict()
for event_id, event in events.items():
if event.has_valid_signature:
result[event_id] = event
else:
bptc.logger.warn("Event had invalid signature: {}".format(event))
return result
def init_hashgraph(app):
"""Loads the hashgraph from file or creates a new one, if the file doesn't exist."""
from bptc.data.db import DB
from bptc.data.network import Network
# Try to load the Hashgraph from the database
hashgraph = DB.load_hashgraph(os.path.join(app.cl_args.output, 'data.db'))
hashgraph.debug_mode = app.cl_args.debug
# Create a new hashgraph if it could not be loaded
if hashgraph is None or hashgraph.me is None:
me = Member.create()
me.address = IPv4Address("TCP", bptc.ip, bptc.port)
hashgraph = Hashgraph(me, app.cl_args.debug)
app.network = Network(hashgraph, create_initial_event=True)
else:
app.network = Network(hashgraph, create_initial_event=False)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import campaign_simulation
from google.ads.googleads.v8.services.types import campaign_simulation_service
from .transports.base import (
CampaignSimulationServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CampaignSimulationServiceGrpcTransport
class CampaignSimulationServiceClientMeta(type):
"""Metaclass for the CampaignSimulationService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CampaignSimulationServiceTransport]]
_transport_registry["grpc"] = CampaignSimulationServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CampaignSimulationServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CampaignSimulationServiceClient(
metaclass=CampaignSimulationServiceClientMeta
):
"""Service to fetch campaign simulations."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CampaignSimulationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CampaignSimulationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CampaignSimulationServiceTransport:
"""Return the transport used by the client instance.
Returns:
CampaignSimulationServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def campaign_simulation_path(
customer_id: str,
campaign_id: str,
type: str,
modification_method: str,
start_date: str,
end_date: str,
) -> str:
"""Return a fully-qualified campaign_simulation string."""
return "customers/{customer_id}/campaignSimulations/{campaign_id}~{type}~{modification_method}~{start_date}~{end_date}".format(
customer_id=customer_id,
campaign_id=campaign_id,
type=type,
modification_method=modification_method,
start_date=start_date,
end_date=end_date,
)
@staticmethod
def parse_campaign_simulation_path(path: str) -> Dict[str, str]:
"""Parse a campaign_simulation path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/campaignSimulations/(?P<campaign_id>.+?)~(?P<type>.+?)~(?P<modification_method>.+?)~(?P<start_date>.+?)~(?P<end_date>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CampaignSimulationServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the campaign simulation service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CampaignSimulationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CampaignSimulationServiceTransport):
# transport is a CampaignSimulationServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CampaignSimulationServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_campaign_simulation(
self,
request: campaign_simulation_service.GetCampaignSimulationRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> campaign_simulation.CampaignSimulation:
r"""Returns the requested campaign simulation in full
detail.
Args:
request (:class:`google.ads.googleads.v8.services.types.GetCampaignSimulationRequest`):
The request object. Request message for
[CampaignSimulationService.GetCampaignSimulation][google.ads.googleads.v8.services.CampaignSimulationService.GetCampaignSimulation].
resource_name (:class:`str`):
Required. The resource name of the
campaign simulation to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.CampaignSimulation:
A campaign simulation. Supported combinations of advertising
channel type, simulation type and simulation
modification method is detailed below respectively.
SEARCH - CPC_BID - UNIFORM SEARCH - CPC_BID - SCALING
SEARCH - TARGET_CPA - UNIFORM SEARCH - TARGET_CPA -
SCALING SEARCH - TARGET_ROAS - UNIFORM SEARCH -
TARGET_IMPRESSION_SHARE - UNIFORM SEARCH - BUDGET -
UNIFORM SHOPPING - BUDGET - UNIFORM SHOPPING -
TARGET_ROAS - UNIFORM MULTIPLE - TARGET_CPA - UNIFORM
OWNED_AND_OPERATED - TARGET_CPA - DEFAULT DISPLAY -
TARGET_CPA - UNIFORM
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a campaign_simulation_service.GetCampaignSimulationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, campaign_simulation_service.GetCampaignSimulationRequest
):
request = campaign_simulation_service.GetCampaignSimulationRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_campaign_simulation
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("CampaignSimulationServiceClient",)
|
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Andy Casey <acasey@mso.anu.edu.au> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 John Eskew <jeskew@edx.org> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.PaginatedList
import github.GitCommit
import github.NamedUser
import github.CommitStatus
import github.CommitCombinedStatus
import github.File
import github.CommitStats
import github.CommitComment
class Commit(github.GithubObject.CompletableGithubObject):
"""
This class represents Commits. The reference can be found here http://developer.github.com/v3/git/commits/
"""
def __repr__(self):
return self.get__repr__({"sha": self._sha.value})
@property
def author(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._author)
return self._author.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commit(self):
"""
:type: :class:`github.GitCommit.GitCommit`
"""
self._completeIfNotSet(self._commit)
return self._commit.value
@property
def committer(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._committer)
return self._committer.value
@property
def files(self):
"""
:type: list of :class:`github.File.File`
"""
self._completeIfNotSet(self._files)
return self._files.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def parents(self):
"""
:type: list of :class:`github.Commit.Commit`
"""
self._completeIfNotSet(self._parents)
return self._parents.value
@property
def sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def stats(self):
"""
:type: :class:`github.CommitStats.CommitStats`
"""
self._completeIfNotSet(self._stats)
return self._stats.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def create_comment(self, body, line=github.GithubObject.NotSet, path=github.GithubObject.NotSet, position=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/commits/:sha/comments <http://developer.github.com/v3/repos/comments>`_
:param body: string
:param line: integer
:param path: string
:param position: integer
:rtype: :class:`github.CommitComment.CommitComment`
"""
assert isinstance(body, (str, unicode)), body
assert line is github.GithubObject.NotSet or isinstance(line, (int, long)), line
assert path is github.GithubObject.NotSet or isinstance(path, (str, unicode)), path
assert position is github.GithubObject.NotSet or isinstance(position, (int, long)), position
post_parameters = {
"body": body,
}
if line is not github.GithubObject.NotSet:
post_parameters["line"] = line
if path is not github.GithubObject.NotSet:
post_parameters["path"] = path
if position is not github.GithubObject.NotSet:
post_parameters["position"] = position
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/comments",
input=post_parameters
)
return github.CommitComment.CommitComment(self._requester, headers, data, completed=True)
def create_status(self, state, target_url=github.GithubObject.NotSet, description=github.GithubObject.NotSet, context=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/statuses/:sha <http://developer.github.com/v3/repos/statuses>`_
:param state: string
:param target_url: string
:param description: string
:param context: string
:rtype: :class:`github.CommitStatus.CommitStatus`
"""
assert isinstance(state, (str, unicode)), state
assert target_url is github.GithubObject.NotSet or isinstance(target_url, (str, unicode)), target_url
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert context is github.GithubObject.NotSet or isinstance(context, (str, unicode)), context
post_parameters = {
"state": state,
}
if target_url is not github.GithubObject.NotSet:
post_parameters["target_url"] = target_url
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if context is not github.GithubObject.NotSet:
post_parameters["context"] = context
headers, data = self._requester.requestJsonAndCheck(
"POST",
self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
input=post_parameters
)
return github.CommitStatus.CommitStatus(self._requester, headers, data, completed=True)
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/commits/:sha/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
"""
return github.PaginatedList.PaginatedList(
github.CommitComment.CommitComment,
self._requester,
self.url + "/comments",
None
)
def get_statuses(self):
"""
:calls: `GET /repos/:owner/:repo/statuses/:ref <http://developer.github.com/v3/repos/statuses>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitStatus.CommitStatus`
"""
return github.PaginatedList.PaginatedList(
github.CommitStatus.CommitStatus,
self._requester,
self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
None
)
def get_combined_status(self):
"""
:calls: `GET /repos/:owner/:repo/commits/:ref/status/ <http://developer.github.com/v3/repos/statuses>`_
:rtype: :class:`github.CommitCombinedStatus.CommitCombinedStatus`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/status"
)
return github.CommitCombinedStatus.CommitCombinedStatus(self._requester, headers, data, completed=True)
@property
def _identity(self):
return self.sha
def _initAttributes(self):
self._author = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commit = github.GithubObject.NotSet
self._committer = github.GithubObject.NotSet
self._files = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._parents = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._stats = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "author" in attributes: # pragma no branch
self._author = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["author"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commit" in attributes: # pragma no branch
self._commit = self._makeClassAttribute(github.GitCommit.GitCommit, attributes["commit"])
if "committer" in attributes: # pragma no branch
self._committer = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["committer"])
if "files" in attributes: # pragma no branch
self._files = self._makeListOfClassesAttribute(github.File.File, attributes["files"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "parents" in attributes: # pragma no branch
self._parents = self._makeListOfClassesAttribute(Commit, attributes["parents"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "stats" in attributes: # pragma no branch
self._stats = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["stats"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for MOPAC output files"""
# Based on parser in RMG-Py by Greg Magoon
# https://github.com/ReactionMechanismGenerator/RMG-Py/blob/master/external/cclib/parser/mopacparser.py
# Also parts from Ben Albrecht
# https://github.com/ben-albrecht/cclib/blob/master/cclib/parser/mopacparser.py
# Merged and modernized by Geoff Hutchison
import re
import math
import numpy
from cclib.parser import data
from cclib.parser import logfileparser
from cclib.parser import utils
def symbol2int(symbol):
t = utils.PeriodicTable()
return t.number[symbol]
class MOPAC(logfileparser.Logfile):
"""A MOPAC20XX output file."""
def __init__(self, *args, **kwargs):
super().__init__(logname="MOPAC", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "MOPAC log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'MOPAC("%s")' % (self.filename)
def normalisesym(self, label):
"""MOPAC does not require normalizing symmetry labels."""
return label
def before_parsing(self):
#TODO
# Defaults
charge = 0
self.set_attribute('charge', charge)
mult = 1
self.set_attribute('mult', mult)
# Keep track of whether or not we're performing an
# (un)restricted calculation.
self.unrestricted = False
self.is_rohf = False
# Keep track of 1SCF vs. gopt since gopt is default
self.onescf = False
self.geomdone = False
# Compile the dashes-and-or-spaces-only regex.
self.re_dashes_and_spaces = re.compile(r'^[\s-]+$')
self.star = ' * '
self.stars = ' *******************************************************************************'
self.spinstate = {'SINGLET': 1,
'DOUBLET': 2,
'TRIPLET': 3,
'QUARTET': 4,
'QUINTET': 5,
'SEXTET': 6,
'HEPTET': 7,
'OCTET': 8,
'NONET': 9}
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# Extract the package version.
if "For non-commercial use only" in line:
# Ignore the platorm information for now (the last character).
self.metadata["package_version"] = line.split()[8][:-1]
# Use the year as the legacy (short) package version.
self.skip_lines(
inputfile, ["Stewart Computational Chemistry", "s", "s", "s", "s"]
)
self.metadata["legacy_package_version"] = next(inputfile).split()[1][5:]
# Extract the atomic numbers and coordinates from the optimized geometry
# note that cartesian coordinates section occurs multiple times in the file, and we want to end up using the last instance
# also, note that the section labeled cartesian coordinates doesn't have as many decimal places as the one used here
# Example 1 (not used):
# CARTESIAN COORDINATES
#
# NO. ATOM X Y Z
#
# 1 O 4.7928 -0.8461 0.3641
# 2 O 5.8977 -0.3171 0.0092
# ...
# Example 2 (used):
# ATOM CHEMICAL X Y Z
# NUMBER SYMBOL (ANGSTROMS) (ANGSTROMS) (ANGSTROMS)
#
# 1 O 4.79280259 * -0.84610232 * 0.36409474 *
# 2 O 5.89768035 * -0.31706418 * 0.00917035 *
# ... etc.
if line.split() == ["NUMBER", "SYMBOL", "(ANGSTROMS)", "(ANGSTROMS)", "(ANGSTROMS)"]:
self.updateprogress(inputfile, "Attributes", self.cupdate)
self.inputcoords = []
self.inputatoms = []
blankline = inputfile.next()
atomcoords = []
line = inputfile.next()
while len(line.split()) > 6:
# MOPAC Version 14.019L 64BITS suddenly appends this block with
# "CARTESIAN COORDINATES" block with no blank line.
tokens = line.split()
self.inputatoms.append(symbol2int(tokens[1]))
xc = float(tokens[2])
yc = float(tokens[4])
zc = float(tokens[6])
atomcoords.append([xc, yc, zc])
line = inputfile.next()
self.inputcoords.append(atomcoords)
if not hasattr(self, "natom"):
self.atomnos = numpy.array(self.inputatoms, 'i')
self.natom = len(self.atomnos)
if 'CHARGE ON SYSTEM =' in line:
charge = int(line.split()[5])
self.set_attribute('charge', charge)
if 'SPIN STATE DEFINED' in line:
# find the multiplicity from the line token (SINGLET, DOUBLET, TRIPLET, etc)
mult = self.spinstate[line.split()[1]]
self.set_attribute('mult', mult)
# Read energy (in kcal/mol, converted to eV)
#
# FINAL HEAT OF FORMATION = -333.88606 KCAL = -1396.97927 KJ
if 'FINAL HEAT OF FORMATION =' in line:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.scfenergies.append(utils.convertor(utils.float(line.split()[5]), "kcal/mol", "eV"))
# Molecular mass parsing (units will be amu)
#
# MOLECULAR WEIGHT == 130.1890
if line[0:35] == ' MOLECULAR WEIGHT =':
self.molmass = utils.float(line.split()[3])
#rotational constants
#Example:
# ROTATIONAL CONSTANTS IN CM(-1)
#
# A = 0.01757641 B = 0.00739763 C = 0.00712013
# could also read in moment of inertia, but this should just differ by a constant: rot cons= h/(8*Pi^2*I)
# note that the last occurence of this in the thermochemistry section has reduced precision,
# so we will want to use the 2nd to last instance
if line[0:40] == ' ROTATIONAL CONSTANTS IN CM(-1)':
blankline = inputfile.next()
rotinfo = inputfile.next()
if not hasattr(self, "rotcons"):
self.rotcons = []
broken = rotinfo.split()
# leave the rotational constants in Hz
a = float(broken[2])
b = float(broken[5])
c = float(broken[8])
self.rotcons.append([a, b, c])
# Start of the IR/Raman frequency section.
# Example:
# VIBRATION 1 1A ATOM PAIR ENERGY CONTRIBUTION RADIAL
# FREQ. 15.08 C 12 -- C 16 +7.9% (999.0%) 0.0%
# T-DIPOLE 0.2028 C 16 -- H 34 +5.8% (999.0%) 28.0%
# TRAVEL 0.0240 C 16 -- H 32 +5.6% (999.0%) 35.0%
# RED. MASS 1.7712 O 1 -- O 4 +5.2% (999.0%) 0.4%
# EFF. MASS7752.8338
#
# VIBRATION 2 2A ATOM PAIR ENERGY CONTRIBUTION RADIAL
# FREQ. 42.22 C 11 -- C 15 +9.0% (985.8%) 0.0%
# T-DIPOLE 0.1675 C 15 -- H 31 +6.6% (843.6%) 3.3%
# TRAVEL 0.0359 C 15 -- H 29 +6.0% (802.8%) 24.5%
# RED. MASS 1.7417 C 13 -- C 17 +5.8% (792.7%) 0.0%
# EFF. MASS1242.2114
if line[1:10] == 'VIBRATION':
self.updateprogress(inputfile, "Frequency Information", self.fupdate)
# get the vib symmetry
if len(line.split()) >= 3:
sym = line.split()[2]
if not hasattr(self, 'vibsyms'):
self.vibsyms = []
self.vibsyms.append(sym)
line = inputfile.next()
if 'FREQ' in line:
if not hasattr(self, 'vibfreqs'):
self.vibfreqs = []
freq = float(line.split()[1])
self.vibfreqs.append(freq)
line = inputfile.next()
if 'T-DIPOLE' in line:
if not hasattr(self, 'vibirs'):
self.vibirs = []
tdipole = float(line.split()[1])
# transform to km/mol
self.vibirs.append(math.sqrt(tdipole))
line = inputfile.next()
if 'TRAVEL' in line:
pass
line = inputfile.next()
if 'RED. MASS' in line:
if not hasattr(self, 'vibrmasses'):
self.vibrmasses = []
rmass = float(line.split()[2])
self.vibrmasses.append(rmass)
# Orbital eigenvalues, e.g.
# ALPHA EIGENVALUES
# BETA EIGENVALUES
# or just "EIGENVALUES" for closed-shell
if 'EIGENVALUES' in line:
if not hasattr(self, 'moenergies'):
self.moenergies = [] # list of arrays
energies = []
line = inputfile.next()
while len(line.split()) > 0:
energies.extend([float(i) for i in line.split()])
line = inputfile.next()
self.moenergies.append(energies)
# todo:
# Partial charges and dipole moments
# Example:
# NET ATOMIC CHARGES
if line[:16] == '== MOPAC DONE ==':
self.metadata['success'] = True
|
|
from __future__ import print_function, unicode_literals
import re
from decimal import Decimal as D
from aspen import Response
import pytest
from gratipay.security.user import SESSION
from gratipay.testing import Harness
from gratipay.wireup import find_files
overescaping_re = re.compile(r'&(#[0-9]{4}|[a-z]+);')
class TestPages(Harness):
def browse(self, setup=None, **kw):
alice = self.make_participant('alice', claimed_time='now', number='plural')
exchange_id = self.make_exchange('balanced-cc', 19, 0, alice)
alice.insert_into_communities(True, 'Wonderland', 'wonderland')
alan = self.make_participant('alan', claimed_time='now')
alice.add_member(alan)
if setup:
setup(alice)
i = len(self.client.www_root)
urls = []
for spt in find_files(self.client.www_root, '*.spt'):
url = spt[i:-4].replace('/%team/', '/alice/') \
.replace('/alice/%sub', '/alice/foo') \
.replace('/~/%username/', '/~alice/') \
.replace('/for/%slug/', '/for/wonderland/') \
.replace('/%platform/', '/github/') \
.replace('/%user_name/', '/gratipay/') \
.replace('/%membername', '/alan') \
.replace('/%exchange_id.int', '/%s' % exchange_id) \
.replace('/%redirect_to', '/giving') \
.replace('/%endpoint', '/public') \
.replace('/about/me/%sub', '/about/me')
assert '/%' not in url
if 'index' in url.split('/')[-1]:
url = url.rsplit('/', 1)[0] + '/'
urls.append(url)
urls.extend("""
/about/me
/about/me/
/about/me/history
""".split())
for url in urls:
try:
r = self.client.GET(url, **kw)
except Response as r:
if r.code == 404 or r.code >= 500:
raise
assert r.code != 404
assert r.code < 500
assert not overescaping_re.search(r.body.decode('utf8'))
def test_anon_can_browse(self):
self.browse()
def test_new_participant_can_browse(self):
self.browse(auth_as='alice')
def test_on_the_fence_can_browse(self):
def setup(alice):
bob = self.make_participant('bob', claimed_time='now', last_bill_result='')
bob.set_tip_to(alice, D('1.00'))
self.browse(setup, auth_as='alice')
def test_escaping_on_homepage(self):
self.make_participant('alice', claimed_time='now')
expected = "<a href='/alice/'>"
actual = self.client.GET('/', auth_as='alice').body
assert expected in actual
@pytest.mark.xfail(reason="migrating to Teams; #3399")
def test_username_is_in_button(self):
self.make_participant('alice', claimed_time='now')
self.make_participant('bob', claimed_time='now')
body = self.client.GET('/~alice/', auth_as='bob').body
assert '<span class="zero">Give to alice</span>' in body
@pytest.mark.xfail(reason="migrating to Teams; #3399")
def test_username_is_in_unauth_giving_cta(self):
self.make_participant('alice', claimed_time='now')
body = self.client.GET('/~alice/').body
assert 'give to alice' in body
def test_widget(self):
self.make_participant('cheese', claimed_time='now')
expected = "javascript: window.open"
actual = self.client.GET('/~cheese/widget.html').body
assert expected in actual
def test_github_associate(self):
assert self.client.GxT('/on/github/associate').code == 400
def test_twitter_associate(self):
assert self.client.GxT('/on/twitter/associate').code == 400
def test_about(self):
expected = "give money every week"
actual = self.client.GET('/about/').body
assert expected in actual
def test_about_stats(self):
expected = "have joined Gratipay"
actual = self.client.GET('/about/stats.html').body
assert expected in actual
def test_about_charts(self):
assert self.client.GxT('/about/charts.html').code == 302
def test_about_faq(self):
expected = "What is Gratipay?"
actual = self.client.GET('/about/faq.html').body.decode('utf8')
assert expected in actual
def test_about_teams_redirect(self):
assert self.client.GxT('/about/teams/').code == 302
def test_about_teams(self):
expected = "Teams"
actual = self.client.GET('/about/features/teams/').body.decode('utf8')
assert expected in actual
def test_404(self):
response = self.client.GET('/about/four-oh-four.html', raise_immediately=False)
assert "Not Found" in response.body
assert "{%" not in response.body
def test_for_contributors_redirects_to_inside_gratipay(self):
loc = self.client.GxT('/for/contributors/').headers['Location']
assert loc == 'http://inside.gratipay.com/'
def test_mission_statement_also_redirects(self):
assert self.client.GxT('/for/contributors/mission-statement.html').code == 302
def test_anonymous_sign_out_redirects(self):
response = self.client.PxST('/sign-out.html')
assert response.code == 302
assert response.headers['Location'] == '/'
def test_sign_out_overwrites_session_cookie(self):
self.make_participant('alice')
response = self.client.PxST('/sign-out.html', auth_as='alice')
assert response.code == 302
assert response.headers.cookie[SESSION].value == ''
def test_sign_out_doesnt_redirect_xhr(self):
self.make_participant('alice')
response = self.client.PxST('/sign-out.html', auth_as='alice',
HTTP_X_REQUESTED_WITH=b'XMLHttpRequest')
assert response.code == 200
def test_settings_page_available_balance(self):
self.make_participant('alice', claimed_time='now')
self.db.run("UPDATE participants SET balance = 123.00 WHERE username = 'alice'")
actual = self.client.GET("/~alice/settings/", auth_as="alice").body
expected = "123"
assert expected in actual
def test_subscriptions_page(self):
self.make_team(is_approved=True)
alice = self.make_participant('alice', claimed_time='now')
alice.set_subscription_to('TheATeam', "1.00")
assert "The A Team" in self.client.GET("/~alice/subscriptions/", auth_as="alice").body
def test_giving_page_shows_cancelled(self):
self.make_team(is_approved=True)
alice = self.make_participant('alice', claimed_time='now')
alice.set_subscription_to('TheATeam', "1.00")
alice.set_subscription_to('TheATeam', "0.00")
assert "Cancelled" in self.client.GET("/~alice/subscriptions/", auth_as="alice").body
def test_new_participant_can_edit_profile(self):
self.make_participant('alice', claimed_time='now')
body = self.client.GET("/~alice/", auth_as="alice").body
assert b'Edit' in body
def test_tilde_slash_redirects_to_tilde(self):
self.make_participant('alice', claimed_time='now')
response = self.client.GxT("/~/alice/", auth_as="alice")
assert response.code == 302
assert response.headers['Location'] == '/~alice/'
def test_tilde_slash_redirects_subpages_with_querystring_to_tilde(self):
self.make_participant('alice', claimed_time='now')
response = self.client.GxT("/~/alice/foo/bar?baz=buz", auth_as="alice")
assert response.code == 302
assert response.headers['Location'] == '/~alice/foo/bar?baz=buz'
def test_username_redirected_to_tilde(self):
self.make_participant('alice', claimed_time='now')
response = self.client.GxT("/alice/", auth_as="alice")
assert response.code == 302
assert response.headers['Location'] == '/~alice/'
def test_username_redirects_everything_to_tilde(self):
self.make_participant('alice', claimed_time='now')
response = self.client.GxT("/alice/foo/bar?baz=buz", auth_as="alice")
assert response.code == 302
assert response.headers['Location'] == '/~alice/foo/bar?baz=buz'
def test_team_slug__not__redirected_from_tilde(self):
self.make_team(is_approved=True)
assert self.client.GET("/TheATeam/").code == 200
assert self.client.GxT("/~TheATeam/").code == 404
|
|
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.db import models
from django.db.models.fields import CharField, TextField
from django.forms import Textarea, ModelForm
from import_export.admin import ImportExportModelAdmin
from solo.admin import SingletonModelAdmin
from .models import Audiologist
from .models import AudiologistResource
from .models import Client
from .models import ClientResource
from .models import MeetingLog
from .models import MeetingLogResource
from .models import Provider
from .models import ProviderResource
from .models import IncomeSource
from .models import Settings
from .models import Grantor
from .models import GrantorResource
standard_textarea = Textarea(attrs={'rows': 3,
'cols': 40,
'style': 'height: 3.6em;'})
class DeleteNotAllowedModelAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return request.user.is_superuser
class AudiologistCurrentFilter(SimpleListFilter):
'''
Custom filter that defaults to "current" == True
'''
title = 'Status'
parameter_name = 'current'
def lookups(self, request, model_admin):
return (
('a', 'All audiologists'),
('y', 'Current'),
('n', 'Inactive'),
)
def queryset(self, request, queryset):
if self.value() == 'a':
return queryset.filter()
url_val_map = {
'y': True,
'n': False,
None: True,
}
val = url_val_map[self.value()]
return queryset.filter(current=val)
def choices(self, cl, *a, **kw):
yield {
'selected': self.value() is None or self.value == 'y',
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': 'Current',
}
yield {
'selected': self.value() == 'n',
'query_string': cl.get_query_string({self.parameter_name: 'n'}, []),
'display': 'Inactive',
}
yield {
'selected': self.value() == 'a',
'query_string': cl.get_query_string({self.parameter_name: 'a'}, []),
'display': 'All',
}
class AudiologistAdmin(DeleteNotAllowedModelAdmin, ImportExportModelAdmin):
list_display = ('name', 'allowed', 'current')
list_filter = (AudiologistCurrentFilter,)
ordering = ('name',)
resource_class = AudiologistResource
formfield_overrides = {
models.TextField: {
'widget': standard_textarea,
},
}
class ClientIncomeInlineAdmin(admin.TabularInline):
model = IncomeSource
can_delete = True
extra = 1
class MeetingLogInlineAdminForm(ModelForm):
class Meta:
model = MeetingLog
fields = '__all__'
widgets = {
'results': standard_textarea,
}
class MeetingLogInlineAdmin(admin.TabularInline):
model = MeetingLog
form = MeetingLogInlineAdminForm
can_delete = True
extra = 1
class DateYesNoFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('y', 'Yes'),
('n', 'No'),
)
def queryset(self, request, queryset):
query = {}
if self.value() == 'y':
query = {self.field_name + '__isnull': False}
elif self.value() == 'n':
query = {self.field_name + '__isnull': True}
return queryset.filter(**query)
class DeceasedFilter(DateYesNoFilter):
title = 'Deceased'
parameter_name = 'deceased'
field_name = 'date_of_death'
class CostShareApprovedFilter(DateYesNoFilter):
title = 'Cost Share Approved'
parameter_name = 'cost_share_approved'
field_name = 'cost_share_approval'
class UpdateMeetingFilter(DateYesNoFilter):
title = 'Had Update Meeting'
parameter_name = 'update_meeting'
field_name = 'update_meeting'
class ProviderAuthReqFilter(DateYesNoFilter):
title = 'Provider Auth Requested'
parameter_name = 'provider_auth_requested'
field_name = 'provider_auth_requested'
class ProviderAuthRecvFilter(DateYesNoFilter):
title = 'Provider Auth Rcvd'
parameter_name = 'provider_auth_received'
field_name = 'provider_auth_received'
class AudiologistReferredFilter(DateYesNoFilter):
title = 'Audiologist Referred'
parameter_name = 'audiologist_referral_date'
field_name = 'audiologist_referral_date'
class AudiologistApptFilter(DateYesNoFilter):
title = 'Audiologist Appt Set'
parameter_name = 'audiologist_appointment_date'
field_name = 'audiologist_appointment_date'
class AudiologistInvoicedFilter(DateYesNoFilter):
title = 'Audiologist Invoiced'
parameter_name = 'audiologist_invoiced_date'
field_name = 'audiologist_invoiced_date'
class ClientAdmin(ImportExportModelAdmin):
resource_class = ClientResource
list_display = ('last_name', 'first_name', 'intake_date', 'last_updated', 'hearing_loss', 'audiologist', 'client_grantors', 'cost_share', 'cost_share_approval')
list_display_links = ('last_name', 'first_name',)
list_filter = ('provider', 'audiologist', 'grantors', 'family_size', 'hearing_loss',
DeceasedFilter, CostShareApprovedFilter, UpdateMeetingFilter, 'update_meeting',
ProviderAuthReqFilter, ProviderAuthRecvFilter,
AudiologistReferredFilter, AudiologistApptFilter, AudiologistInvoicedFilter,
'equipment_requested', 'adaptive_equipment', 'hearing_aid_assistance',
'last_updated',
'quota_client', 'deliverable', 'non_kcsm',
'intake_staff', 'data_entry_staff')
ordering = ('-intake_date',)
date_hierarchy = 'intake_date'
search_fields = [f.name for f in Client._meta.local_fields if isinstance(f, (CharField, TextField))]
formfield_overrides = {
models.TextField: {
'widget': standard_textarea,
},
}
inlines = (ClientIncomeInlineAdmin,MeetingLogInlineAdmin)
readonly_fields = ('id', 'last_updated')
fieldsets = (
(None, {
'fields': (
'id', 'napis_id',
)
}),
('Personal Info', {
'fields': (
'first_name', 'last_name', 'gender', 'date_of_birth', 'date_of_death',
'is_veteran', 'lives_alone', 'spouse', 'family_size',
)
}),
('Contact', {
'fields': (
'address', 'city', 'county', 'state', 'zip_code', 'deliverable',
'email', 'phone',
'emergency_contact',
'emergency_phone',
)
}),
('Notes', {
'fields': (
'notes',
)
}),
('Demographics', {
'fields': (
'race', 'is_hispanic',
'multiracial', 'multiracial_white', 'multiracial_black', 'multiracial_asian', 'multiracial_amind',
)
}),
('Assistance', {
'fields': (
'hearing_loss', 'aids_requested_left', 'aids_requested_right', 'equipment_requested',
'hearing_assistance', 'adaptive_equipment', 'hearing_aid_assistance',
'equipment_borrowed',
)
}),
('Additional Forms', {
'fields': (
'proof_of_age', 'signed_client_intake', 'signed_disclosure_authorization',
'signed_confidentiality_policy', 'signed_gross_annual_income',
'signed_client_responsibility_fees'
)
}),
('DHHS', {
'fields': (
'intake_date', 'intake_staff', 'data_entry_staff', 'last_updated', 'referrer',
'update_meeting',
'cost_share_approval', 'cost_share',
'quota_client', 'non_kcsm', 'grantors',
'provider', 'audient_id', 'provider_auth_requested', 'provider_auth_received',
)
}),
('Audiologist', {
'fields': (
'audiologist', 'audiologist_referral_date', 'audiologist_appointment_date',
'audiologist_invoiced_date', 'audiologist_invoiced_amount',
)
}),
)
class MeetingLogAdmin(ImportExportModelAdmin):
resource_class = MeetingLogResource
list_display = ('client', 'contact_date', 'consultation_time', 'paperwork_time', 'units', 'results', 'user')
list_display_links = ('contact_date',)
list_filter = ('client', 'contact_date', 'user')
ordering = ('-contact_date',)
date_hierarchy = 'contact_date'
formfield_overrides = {
models.TextField: {
'widget': standard_textarea,
},
}
def units(self, obj):
return (obj.consultation_time + obj.paperwork_time) / 60
class ProviderAdmin(ImportExportModelAdmin):
ordering = ('name',)
resource_class = ProviderResource
formfield_overrides = {
models.TextField: {
'widget': standard_textarea,
},
}
class GrantorAdmin(ImportExportModelAdmin):
ordering = ('name',)
resource_class = GrantorResource
formfield_overrides = {
models.TextField: {
'widget': standard_textarea,
},
}
admin.site.disable_action('delete_selected')
admin.site.site_header = 'Deaf & Hard of Hearing Services - ADAPT'
admin.site.site_title = 'ADAPT'
admin.site.site_url = None
admin.site.index_title = ''
admin.site.register(Audiologist, AudiologistAdmin)
admin.site.register(Client, ClientAdmin)
admin.site.register(Provider, ProviderAdmin)
admin.site.register(Grantor, GrantorAdmin)
admin.site.register(MeetingLog, MeetingLogAdmin)
admin.site.register(Settings, SingletonModelAdmin)
|
|
"""
Tests for both experiment.py and experiment_set.py
"""
import pytest
from snovault import TYPES
# from snovault.storage import UUID
from uuid import uuid4
from ..types.experiment import ExperimentHiC
pytestmark = [pytest.mark.setone, pytest.mark.working]
@pytest.fixture
def custom_experiment_set_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'test experiment set',
'experimentset_type': 'custom',
'status': 'in review by lab'
}
@pytest.fixture
def custom_experiment_set(testapp, custom_experiment_set_data):
return testapp.post_json('/experiment_set', custom_experiment_set_data).json['@graph'][0]
@pytest.fixture
def replicate_experiment_set_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'test replicate set',
'experimentset_type': 'replicate',
'status': 'in review by lab'
}
@pytest.fixture
def replicate_experiment_set(testapp, replicate_experiment_set_data):
return testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
@pytest.fixture
def sop_map_data(protocol, lab, award):
return {
"sop_name": "in situ Hi-C SOP map",
"sop_version": 1,
'lab': lab['@id'],
'award': award['@id'],
"associated_item_type": "ExperimentHiC",
"id_values": ["in situ Hi-C"],
"notes": "This is just a dummy insert not linked to true SOP protocol",
"description": "Fields with specified defaults in the SOP for in situ Hi-C experiments as per ??",
"sop_protocol": protocol['@id'],
"fields_with_default": [
{"field_name": "digestion_enzyme", "field_value": "MboI"},
]
}
@pytest.fixture
def sop_map_data_2(lab, award):
return {
"sop_name": "Second in situ hic map",
"sop_version": 2,
'lab': lab['@id'],
'award': award['@id'],
"associated_item_type": "ExperimentHiC",
"id_values": ["in situ Hi-C"],
"notes": "This is a dummy second version of map",
"description": "Second",
}
def test_experiment_update_experiment_relation(testapp, base_experiment, experiment):
relation = [{'relationship_type': 'controlled by',
'experiment': experiment['@id']}]
res = testapp.patch_json(base_experiment['@id'], {'experiment_relation': relation})
assert res.json['@graph'][0]['experiment_relation'] == relation
# patching an experiement should also update the related experiement
exp_res = testapp.get(experiment['@id'])
exp_res_id = exp_res.json['experiment_relation'][0]['experiment']['@id']
assert exp_res_id == base_experiment['@id']
def test_experiment_update_hic_sop_mapping_added_on_submit(testapp, experiment_data, sop_map_data):
res_sop = testapp.post_json('/sop_map', sop_map_data, status=201)
res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
assert 'sop_mapping' in res_exp.json['@graph'][0]
assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes"
assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res_sop.json['@graph'][0]['@id']
def test_experiment_update_hic_sop_mapping_has_map_is_no(testapp, experiment_data, exp_types):
experiment_data['experiment_type'] = exp_types['dnase']['@id']
res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
assert 'sop_mapping' in res_exp.json['@graph'][0]
assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "No"
def test_experiment_update_hic_sop_mapping_has_sop2no_when_only_sopmap_deleted(
testapp, experiment_data, sop_map_data):
sop_map_data['status'] = 'deleted'
testapp.post_json('/sop_map', sop_map_data, status=201)
res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
assert 'sop_mapping' in res_exp.json['@graph'][0]
assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "No"
def test_experiment_update_hic_sop_mapping_to_v2_when_2_versions(
testapp, experiment_data, sop_map_data, sop_map_data_2):
testapp.post_json('/sop_map', sop_map_data, status=201)
res2chk = testapp.post_json('/sop_map', sop_map_data_2, status=201)
res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
assert 'sop_mapping' in res_exp.json['@graph'][0]
assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes"
assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res2chk.json['@graph'][0]['@id']
def test_experiment_update_hic_sop_mapping_to_v1_when_v2_deleted(
testapp, experiment_data, sop_map_data, sop_map_data_2):
res2chk = testapp.post_json('/sop_map', sop_map_data, status=201)
sop_map_data_2['status'] = 'deleted'
testapp.post_json('/sop_map', sop_map_data_2, status=201)
res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
assert 'sop_mapping' in res_exp.json['@graph'][0]
assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes"
assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res2chk.json['@graph'][0]['@id']
def test_experiment_update_hic_sop_map_not_added_when_already_present(testapp, experiment_data):
experiment_data['sop_mapping'] = {}
experiment_data['sop_mapping']['has_sop'] = 'No'
res = testapp.post_json('/experiment_hi_c', experiment_data)
assert 'sop_mapping' in res.json['@graph'][0]
assert res.json['@graph'][0]['sop_mapping']['has_sop'] == "No"
assert 'sop_map' not in res.json['@graph'][0]['sop_mapping']
def test_calculated_experiment_summary(testapp, experiment, mboI):
summary = 'in situ Hi-C on GM12878 with MboI'
res = testapp.patch_json(experiment['@id'], {'digestion_enzyme': mboI['@id']}, status=200)
assert res.json['@graph'][0]['experiment_summary'] == summary
assert summary in res.json['@graph'][0]['display_title']
def test_experiment_summary_repliseq(repliseq_4):
assert repliseq_4.get('experiment_summary') == '2-stage Repli-seq on GM12878 S-phase early'
# test for experiment_set_replicate _update function
def test_experiment_set_replicate_update_adds_experiments_in_set(testapp, experiment, replicate_experiment_set):
assert not replicate_experiment_set['experiments_in_set']
res = testapp.patch_json(
replicate_experiment_set['@id'],
{'replicate_exps':
[{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]},
status=200)
assert experiment['@id'] in res.json['@graph'][0]['experiments_in_set']
# test for default_embedding practice with embedded list
# this test should change should any of the reference embeds below be altered
def test_experiment_set_default_embedded_list(registry, exp_types):
exp_data = {
'experiment_type': exp_types['microc']['uuid'],
'status': 'in review by lab'
}
# create experimentHiC obj; _update (and by extension, add_default_embeds)
# are called automatically
test_exp = ExperimentHiC.create(registry, None, exp_data)
# call reify embedded property (defined in snovault/resources.py)
embedded = test_exp.embedded
embedded_list = test_exp.embedded_list
type_info_embedded = registry[TYPES]['experiment_hi_c'].embedded_list
assert type_info_embedded == embedded_list
if 'produced_in_pub.*' in embedded_list:
assert 'produced_in_pub.*' in embedded
assert 'produced_in_pub.award.@id' in embedded
assert 'produced_in_pub.award.@type' in embedded
assert 'produced_in_pub.award.principals_allowed.*' in embedded
assert 'produced_in_pub.award.display_title' in embedded
assert 'produced_in_pub.award.uuid' in embedded
assert 'experiment_sets.accession' in embedded_list
assert 'experiment_sets.@id' in embedded
assert 'experiment_sets.@type' in embedded
assert 'experiment_sets.principals_allowed.*' in embedded
assert 'experiment_sets.display_title' in embedded
assert 'experiment_sets.uuid' in embedded
# tests for the experiment_sets calculated properties
def test_calculated_experiment_sets_for_custom_experiment_set(testapp, experiment, custom_experiment_set):
assert len(experiment['experiment_sets']) == 0
res = testapp.patch_json(custom_experiment_set['@id'], {'experiments_in_set': [experiment['@id']]}, status=200)
expt_res = testapp.get(experiment['@id'])
assert custom_experiment_set['uuid'] == expt_res.json['experiment_sets'][0]['uuid']
def test_calculated_experiment_sets_for_replicate_experiment_set(testapp, experiment, replicate_experiment_set):
assert len(experiment['experiment_sets']) == 0
res = testapp.patch_json(
replicate_experiment_set['@id'],
{'replicate_exps':
[{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]},
status=200)
expt_res = testapp.get(experiment['@id'])
assert replicate_experiment_set['uuid'] == expt_res.json['experiment_sets'][0]['uuid']
@pytest.fixture
def pub1_data(lab, award):
# encode paper published 2012-09-06
return {
'award': award['@id'],
'lab': lab['@id'],
'ID': "PMID:22955616"
}
@pytest.fixture
def pub2_data(lab, award):
# Sanborn et al paper published 2015-11-24
return {
'award': award['@id'],
'lab': lab['@id'],
'ID': "PMID:26499245"
}
def test_calculated_produced_in_pub_for_rep_experiment_set(testapp, replicate_experiment_set, pub1_data):
# post single rep_exp_set to single pub
pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
expsetres = testapp.get(replicate_experiment_set['@id'])
assert 'produced_in_pub' in expsetres
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in expsetres.json['produced_in_pub'].values()
def test_calculated_produced_in_pub_for_cust_experiment_set(testapp, custom_experiment_set, pub1_data):
# post single cust_exp_set to single pub
pub1_data['exp_sets_prod_in_pub'] = [custom_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
expsetres = testapp.get(custom_experiment_set['@id'])
assert 'produced_in_pub' in expsetres
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in expsetres.json['produced_in_pub'].values()
def test_calculated_produced_in_pub_for_two_experiment_set_to_one_pub(
testapp, replicate_experiment_set, custom_experiment_set, pub1_data):
# post two exp_set to single pub
pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id'], custom_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
responses = [testapp.get(replicate_experiment_set['@id']),
testapp.get(custom_experiment_set['@id'])]
for response in responses:
assert 'produced_in_pub' in response
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_produced_in_pub_for_two_experiment_set_two_pubs(
testapp, replicate_experiment_set, custom_experiment_set, pub1_data, pub2_data):
# post different exp_set to each pub
pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
pub2_data['exp_sets_prod_in_pub'] = [custom_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
pub2res = testapp.post_json('/publication', pub2_data, status=201)
responses = [testapp.get(replicate_experiment_set['@id']),
testapp.get(custom_experiment_set['@id'])]
for response in responses:
assert 'produced_in_pub' in response
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == responses[0].json['produced_in_pub']['@id']
assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == responses[1].json['produced_in_pub']['@id']
def test_calculated_produced_in_pub_for_one_experiment_set_two_pubs(
testapp, replicate_experiment_set, pub1_data, pub2_data):
# post one exp_set to two pubs - this one should pick up only the most recent pub
pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
pub2_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
pub2res = testapp.post_json('/publication', pub2_data, status=201)
response = testapp.get(replicate_experiment_set['@id'])
assert 'produced_in_pub' in response
assert not '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_publications_in_experiment_set_no_data(
testapp, replicate_experiment_set, custom_experiment_set, pub1_data):
pub1res = testapp.post_json('/publication', pub1_data, status=201)
print(replicate_experiment_set)
print(custom_experiment_set)
assert not replicate_experiment_set['publications_of_set']
assert not custom_experiment_set['publications_of_set']
def test_calculated_publications_in_rep_experiment_set_2_fields(
testapp, replicate_experiment_set, pub1_data):
# post single rep_exp_set to single pub both fields
pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
pub1_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
response = testapp.get(replicate_experiment_set['@id'])
print(response)
print('JSON:', response.json)
assert 'publications_of_set' in response
assert len(response.json['publications_of_set']) == 1
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in response.json['publications_of_set'][0].values()
def test_calculated_publications_in_cust_experiment_set_used_in_field(
testapp, custom_experiment_set, pub1_data):
# post only used in publication one pub one exp set
pub1_data['exp_sets_used_in_pub'] = [custom_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
response = testapp.get(custom_experiment_set['@id'])
assert 'publications_of_set' in response
assert len(response.json['publications_of_set']) == 1
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in response.json['publications_of_set'][0].values()
def test_calculated_publications_in_rep_experiment_set_two_pubs_both_fields(
testapp, replicate_experiment_set, pub1_data, pub2_data):
# post same experiment set to two pubs in either field
pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
pub2_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
pub2res = testapp.post_json('/publication', pub2_data, status=201)
response = testapp.get(replicate_experiment_set['@id'])
assert 'publications_of_set' in response
assert len(response.json['publications_of_set']) == 2
publications = response.json['publications_of_set']
combined_pub_vals = [p['@id'] for p in publications]
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
def test_calculated_publications_in_rep_experiment_set_two_pubs_in_used(
testapp, replicate_experiment_set, pub1_data, pub2_data):
# post same experiment set to two pubs in used in pub field
pub1_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
pub2_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
pub2res = testapp.post_json('/publication', pub2_data, status=201)
response = testapp.get(replicate_experiment_set['@id'])
assert 'publications_of_set' in response
assert len(response.json['publications_of_set']) == 2
publications = response.json['publications_of_set']
combined_pub_vals = list(publications[0].values()) + list(publications[1].values())
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
# experiment pub calculated properties tests
@pytest.fixture
def repset_w_exp1(testapp, replicate_experiment_set_data, experiment):
repset = replicate_experiment_set_data
repset['replicate_exps'] = [{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]
return testapp.post_json('/experiment_set_replicate', repset).json['@graph'][0]
@pytest.fixture
def experiment2(testapp, experiment_data, exp_types):
experiment_data['experiment_type'] = exp_types['capc']['@id']
return testapp.post_json('/experiment_capture_c', experiment_data).json['@graph'][0]
@pytest.fixture
def custset_w_exp1(testapp, custom_experiment_set_data, experiment):
custset = custom_experiment_set_data
custset['experiments_in_set'] = [experiment['@id']]
return testapp.post_json('/experiment_set', custset).json['@graph'][0]
@pytest.fixture
def custset_w_exp2(testapp, custom_experiment_set_data, experiment2):
custset = custom_experiment_set_data
custset['experiments_in_set'] = [experiment2['@id']]
return testapp.post_json('/experiment_set', custset).json['@graph'][0]
def test_calculated_expt_produced_in_pub_for_rep_experiment_set(
testapp, repset_w_exp1, pub1_data):
# post single rep_exp_set to single pub
pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
expres = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
# import pdb; pdb.set_trace()
assert 'produced_in_pub' in expres
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == expres.json['produced_in_pub']['@id']
def test_calculated_expt_produced_in_pub_for_expt_w_ref(
testapp, experiment_data, replicate_experiment_set_data, pub2_data, publication):
experiment_data['references'] = [publication['@id']]
# just check experiment by itself first
expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0]
assert 'produced_in_pub' in expt
assert publication['@id'] == expt['produced_in_pub']
# post repset with this experiment
replicate_experiment_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': expt['@id']}]
repset = testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data, status=201).json['@graph'][0]
# post single rep_exp_set to single pub
pub2_data['exp_sets_prod_in_pub'] = [repset['@id']]
testapp.post_json('/publication', pub2_data, status=201)
expinset = testapp.get(repset['replicate_exps'][0]['replicate_exp']).json
assert 'produced_in_pub' in expinset
assert publication['@id'] == expinset['produced_in_pub']['@id']
def test_calculated_expt_produced_in_pub_for_cust_experiment_set(
testapp, custset_w_exp1, pub1_data):
# post single cust_exp_set to single pub
pub1_data['exp_sets_prod_in_pub'] = [custset_w_exp1['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
expres = testapp.get(custset_w_exp1['experiments_in_set'][0])
assert 'produced_in_pub' not in expres.json.keys()
def test_calculated_expt_produced_in_pub_for_one_expt_in_two_expset_one_pub(
testapp, repset_w_exp1, custset_w_exp1, pub1_data):
# post two exp_set with same experiment (repset and custset) to single pub
pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id'], custset_w_exp1['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
# both responses will get the same experiment
responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']),
testapp.get(custset_w_exp1['experiments_in_set'][0])]
for response in responses:
assert 'produced_in_pub' in response
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_expt_produced_in_pub_for_two_exp_two_expset_two_pubs(
testapp, repset_w_exp1, custset_w_exp2, pub1_data, pub2_data):
# post 2 exp_set (one repset, one custom) each with diff expt to each pub
# only expt in repset should get the pub of repset
pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
pub2_data['exp_sets_prod_in_pub'] = [custset_w_exp2['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
testapp.post_json('/publication', pub2_data, status=201)
responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']),
testapp.get(custset_w_exp2['experiments_in_set'][0])]
assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == responses[0].json['produced_in_pub']['@id']
assert 'produced_in_pub' not in responses[1].json
def test_calculated_expt_produced_in_pub_for_one_expt_one_expset_two_pubs(
testapp, repset_w_exp1, pub1_data, pub2_data):
# post one exp_set to two pubs - this one should pick up only the most recent pub
pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
pub2_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
pub2res = testapp.post_json('/publication', pub2_data, status=201)
response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
assert 'produced_in_pub' in response
assert not '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_publications_in_experiment_no_data(
testapp, repset_w_exp1, custset_w_exp2, pub1_data):
pub1res = testapp.post_json('/publication', pub1_data, status=201)
responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']),
testapp.get(custset_w_exp2['experiments_in_set'][0])]
for response in responses:
assert response.json['publications_of_exp'] == []
def test_calculated_publications_in_expt_w_repset_in_both_fields(
testapp, repset_w_exp1, pub1_data):
# post single rep_exp_set to single pub both fields
pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
pub1_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
assert 'publications_of_exp' in response
assert len(response.json['publications_of_exp']) == 1
assert pub1res.json['@graph'][0]['uuid'] == response.json['publications_of_exp'][0]['uuid']
def test_calculated_publications_in_expt_w_custset_used_in_field(
testapp, custset_w_exp2, pub1_data):
# post only used in publication one pub one exp set
pub1_data['exp_sets_used_in_pub'] = [custset_w_exp2['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
response = testapp.get(custset_w_exp2['experiments_in_set'][0])
assert 'publications_of_exp' in response
assert len(response.json['publications_of_exp']) == 1
assert pub1res.json['@graph'][0]['uuid'] == response.json['publications_of_exp'][0]['uuid']
def test_calculated_publications_in_expt_w_repset_two_pubs_both_fields(
testapp, repset_w_exp1, pub1_data, pub2_data):
# post same experiment set to two pubs in either field
pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
pub2_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
pub2res = testapp.post_json('/publication', pub2_data, status=201)
pubuuids = [pub1res.json['@graph'][0]['uuid']]
pubuuids.append(pub2res.json['@graph'][0]['uuid'])
response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
assert 'publications_of_exp' in response
assert len(response.json['publications_of_exp']) == 2
publications = response.json['publications_of_exp']
for pub in publications:
assert pub['uuid'] in pubuuids
def test_calculated_publications_in_expt_w_repset_two_pubs_in_used(
testapp, repset_w_exp1, pub1_data, pub2_data):
# post same experiment set to two pubs in used in pub field
pub1_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
pub2_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
pub1res = testapp.post_json('/publication', pub1_data, status=201)
pub2res = testapp.post_json('/publication', pub2_data, status=201)
pubuuids = [pub1res.json['@graph'][0]['uuid']]
pubuuids.append(pub2res.json['@graph'][0]['uuid'])
response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
assert 'publications_of_exp' in response
assert len(response.json['publications_of_exp']) == 2
publications = response.json['publications_of_exp']
for pub in publications:
assert pub['uuid'] in pubuuids
def test_calculated_no_of_expts_in_set_w_no_exps(empty_replicate_set):
assert 'number_of_experiments' not in empty_replicate_set
def test_calculated_no_of_expts_in_set_w_2_exps(two_experiment_replicate_set):
assert two_experiment_replicate_set['number_of_experiments'] == 2
# tests for category calculated_property
@pytest.fixture
def target_w_prot(testapp, lab, award):
item = {
'description': "Protein target",
'targeted_proteins': ['CTCF (ABCD)'],
'award': award['@id'],
'lab': lab['@id'],
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def exp_w_target_info(lab, award, human_biosample, exp_types,
mboI, genomic_region_bio_feature):
return {
'lab': lab['@id'],
'award': award['@id'],
'biosample': human_biosample['@id'],
'experiment_type': exp_types['capc']['@id'],
'targeted_regions': [{'target': [genomic_region_bio_feature['@id']]}]
}
@pytest.fixture
def expt_w_targ_region(testapp, exp_w_target_info):
return testapp.post_json('/experiment_capture_c', exp_w_target_info).json['@graph'][0]
@pytest.fixture
def expt_w_2_targ_regions(testapp, exp_w_target_info, gene_bio_feature):
region = {'target': [gene_bio_feature['@id']]}
exp_w_target_info['targeted_regions'].append(region)
return testapp.post_json('/experiment_capture_c', exp_w_target_info).json['@graph'][0]
@pytest.fixture
def expt_w_target_data(lab, award, human_biosample,
prot_bio_feature, exp_types):
return {
'lab': lab['@id'],
'award': award['@id'],
'biosample': human_biosample['@id'],
'experiment_type': exp_types['chia']['@id'],
'targeted_factor': [prot_bio_feature['@id']]
}
@pytest.fixture
def expt_w_target(testapp, expt_w_target_data):
return testapp.post_json('/experiment_chiapet', expt_w_target_data).json['@graph'][0]
@pytest.fixture
def chipseq_expt(testapp, expt_w_target_data, exp_types):
expt_w_target_data['experiment_type'] = exp_types['chipseq']['@id']
return testapp.post_json('/experiment_seq', expt_w_target_data).json['@graph'][0]
@pytest.fixture
def tsaseq_expt(testapp, expt_w_target_data, exp_types):
expt_w_target_data['experiment_type'] = exp_types['tsaseq']['@id']
return testapp.post_json('/experiment_tsaseq', expt_w_target_data).json['@graph'][0]
@pytest.fixture
def repliseq_info(lab, award, human_biosample, exp_types):
return {
'lab': lab['@id'],
'award': award['@id'],
'biosample': human_biosample['@id'],
'experiment_type': exp_types['repliseq']['@id'],
}
@pytest.fixture
def repliseq_1(testapp, repliseq_info):
return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def repliseq_2(testapp, repliseq_info):
repliseq_info['stage_fraction'] = 'early'
return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def repliseq_3(testapp, repliseq_info):
repliseq_info['stage_fraction'] = 'early'
repliseq_info['total_fractions_in_exp'] = 16
return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def repliseq_4(testapp, repliseq_info):
repliseq_info['stage_fraction'] = 'early'
repliseq_info['total_fractions_in_exp'] = 2
repliseq_info['cell_cycle_phase'] = 'S'
return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def experiment_atacseq(testapp, repliseq_info, exp_types):
repliseq_info['experiment_type'] = exp_types['atacseq']['@id']
return testapp.post_json('/experiment_atacseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def damid_no_fusion(testapp, repliseq_info, exp_types):
repliseq_info['experiment_type'] = exp_types['dam']['@id']
return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0]
@pytest.fixture
def damid_w_fusion(testapp, repliseq_info, prot_bio_feature, exp_types):
repliseq_info['experiment_type'] = exp_types['dam']['@id']
repliseq_info['targeted_factor'] = [prot_bio_feature['@id']]
return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0]
@pytest.fixture
def damid_w_multifusion(testapp, repliseq_info, prot_bio_feature, gene_bio_feature, exp_types):
repliseq_info['experiment_type'] = exp_types['dam']['@id']
repliseq_info['targeted_factor'] = [prot_bio_feature['@id'], gene_bio_feature['@id']]
return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0]
@pytest.fixture
def basic_info(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
}
@pytest.fixture
def imaging_path_1(testapp, basic_info, genomic_region_bio_feature):
basic_info['target'] = [genomic_region_bio_feature['@id']]
basic_info['labeled_probe'] = 'FITC goat anti rabbit'
return testapp.post_json('/imaging_path', basic_info).json['@graph'][0]
@pytest.fixture
def imaging_path_2(testapp, basic_info, genomic_region_bio_feature):
basic_info['target'] = [genomic_region_bio_feature['@id']]
basic_info['labeled_probe'] = 'TRITC horse anti rabbit'
return testapp.post_json('/imaging_path', basic_info).json['@graph'][0]
@pytest.fixture
def imaging_path_3(testapp, basic_info, basic_region_bio_feature):
basic_info['target'] = [basic_region_bio_feature['@id']]
basic_info['labeled_probe'] = 'DAPI'
return testapp.post_json('/imaging_path', basic_info).json['@graph'][0]
@pytest.fixture
def microscopy_no_path(testapp, repliseq_info, exp_types):
repliseq_info['experiment_type'] = exp_types['fish']['@id']
return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
@pytest.fixture
def microscopy_w_path(testapp, repliseq_info, imaging_path_1, exp_types):
repliseq_info['experiment_type'] = exp_types['fish']['@id']
img_path = {'path': imaging_path_1['@id'], 'channel': 'ch01'}
repliseq_info['imaging_paths'] = [img_path]
return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
@pytest.fixture
def microscopy_w_multipath(testapp, repliseq_info, imaging_path_1, imaging_path_2,
imaging_path_3, exp_types):
repliseq_info['experiment_type'] = exp_types['fish']['@id']
img_path1 = {'path': imaging_path_1['@id'], 'channel': 'ch01'}
img_path2 = {'path': imaging_path_2['@id'], 'channel': 'ch02'}
img_path3 = {'path': imaging_path_3['@id'], 'channel': 'ch03'}
repliseq_info['imaging_paths'] = [img_path1, img_path2, img_path3]
return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
@pytest.fixture
def microscopy_w_splitpath(testapp, repliseq_info, exp_types,
imaging_path_1, imaging_path_3,
basic_region_bio_feature, genomic_region_bio_feature):
'''Sometimes a (group of) target(s) is split into different imaging paths,
e.g. due to multiplexing. If text is formatted as follows, the split group
will be found and replaced with the sum'''
repliseq_info['experiment_type'] = exp_types['fish']['@id']
img_path1 = {'path': imaging_path_1['@id'], 'channel': 'ch01'}
img_path3 = {'path': imaging_path_3['@id'], 'channel': 'ch03'}
repliseq_info['imaging_paths'] = [img_path1, img_path3]
testapp.patch_json(basic_region_bio_feature['@id'],
{'preferred_label': '15 TADs on chr19'}).json['@graph'][0]
testapp.patch_json(genomic_region_bio_feature['@id'],
{'preferred_label': '22 TADs on chr19'}).json['@graph'][0]
return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
def test_experiment_atacseq_display_title(experiment_atacseq):
assert experiment_atacseq.get('display_title') == 'ATAC-seq on GM12878 - ' + experiment_atacseq.get('accession')
def test_experiment_damid_w_multifusion_display_title(damid_w_multifusion):
assert damid_w_multifusion.get('display_title') == 'DamID-seq with mulitiple DAM fusions on GM12878 - ' + damid_w_multifusion.get('accession')
def test_experiment_chiapet_w_target_display_title(expt_w_target):
assert expt_w_target.get('display_title') == 'ChIA-PET against RAD21 protein on GM12878 - ' + expt_w_target.get('accession')
def test_experiment_chipseq_w_target_display_title(chipseq_expt):
assert chipseq_expt.get('display_title') == 'ChIP-seq against RAD21 protein on GM12878 - ' + chipseq_expt.get('accession')
def test_experiment_tsaseq_display_title(tsaseq_expt):
assert tsaseq_expt.get('display_title') == 'TSA-seq against RAD21 protein on GM12878 - ' + tsaseq_expt.get('accession')
def test_experiment_categorizer_4_mic_no_path(testapp, microscopy_no_path):
assert microscopy_no_path['experiment_categorizer']['field'] == 'Default'
assert microscopy_no_path['experiment_categorizer'].get('value') is None
def test_experiment_categorizer_4_mic_w_path(testapp, microscopy_w_path, genomic_region_bio_feature):
assert microscopy_w_path['experiment_categorizer']['field'] == 'Target'
assert microscopy_w_path['experiment_categorizer']['value'] == genomic_region_bio_feature['display_title']
def test_experiment_categorizer_4_mic_w_multi_path(testapp, microscopy_w_multipath, genomic_region_bio_feature, basic_region_bio_feature):
vals2chk = [genomic_region_bio_feature['display_title'], basic_region_bio_feature['display_title']]
len2chk = len(vals2chk[0]) + len(vals2chk[1]) + 2
assert microscopy_w_multipath['experiment_categorizer']['field'] == 'Target'
value = microscopy_w_multipath['experiment_categorizer']['value']
assert len(value) == len2chk
for v in vals2chk:
assert v in value
def test_experiment_categorizer_4_mic_w_split_path(testapp, microscopy_w_splitpath):
'''Sometimes a (group of) target(s) is split into different imaging paths,
e.g. due to multiplexing. Sum the split targets and return only one string.'''
assert microscopy_w_splitpath['experiment_categorizer']['value'] == '37 TADs on chr19'
def test_experiment_categorizer_4_chiapet_no_fusion(testapp, repliseq_info, exp_types):
repliseq_info['experiment_type'] = exp_types['chia']['@id']
res = testapp.post_json('/experiment_chiapet', repliseq_info).json['@graph'][0]
assert res['experiment_categorizer']['field'] == 'Default'
assert res['experiment_categorizer']['value'] is None
def test_experiment_categorizer_4_damid_no_fusion(testapp, damid_no_fusion):
assert damid_no_fusion['experiment_categorizer']['field'] == 'Target'
assert damid_no_fusion['experiment_categorizer'].get('value') == 'None (Control)'
def test_experiment_categorizer_4_damid_w_fusion(testapp, damid_w_fusion, prot_bio_feature):
assert damid_w_fusion['experiment_categorizer']['field'] == 'Target'
assert damid_w_fusion['experiment_categorizer']['value'] == prot_bio_feature['display_title']
def test_experiment_categorizer_4_repliseq_no_fraction_info(testapp, repliseq_1):
assert repliseq_1['experiment_categorizer']['field'] == 'Default'
assert repliseq_1['experiment_categorizer'].get('value') is None
def test_experiment_categorizer_4_repliseq_only_fraction(testapp, repliseq_2):
wanted = 'early of an unspecified number of fractions'
assert repliseq_2['experiment_categorizer']['field'] == 'Fraction'
assert repliseq_2['experiment_categorizer']['value'] == wanted
def test_experiment_categorizer_4_repliseq_fraction_and_total(testapp, repliseq_3):
wanted = 'early of 16 fractions'
assert repliseq_3['experiment_categorizer']['field'] == 'Fraction'
assert repliseq_3['experiment_categorizer']['value'] == wanted
def test_experiment_categorizer_w_target(testapp, expt_w_target, prot_bio_feature):
assert expt_w_target['experiment_categorizer']['field'] == 'Target'
assert expt_w_target['experiment_categorizer']['value'] == prot_bio_feature['display_title']
def test_experiment_categorizer_w_enzyme(testapp, experiment, mboI):
assert experiment['experiment_categorizer']['field'] == 'Enzyme'
assert experiment['experiment_categorizer']['value'] == mboI['display_title']
def test_experiment_categorizer_w_target_and_enzyme(testapp, expt_w_target, prot_bio_feature, mboI):
# import pdb; pdb.set_trace()
res = testapp.patch_json(expt_w_target['@id'], {'digestion_enzyme': mboI['@id']}).json['@graph'][0]
assert res['digestion_enzyme'] == mboI['@id']
assert res['experiment_categorizer']['field'] == 'Target'
assert res['experiment_categorizer']['value'] == prot_bio_feature['display_title']
def test_experiment_categorizer_w_no_cat1(testapp, experiment_data, exp_types):
del experiment_data['digestion_enzyme']
experiment_data['experiment_type'] = exp_types['rnaseq']['@id']
expt = testapp.post_json('/experiment_seq', experiment_data).json['@graph'][0]
assert expt['experiment_categorizer']['field'] == 'Default'
assert expt['experiment_categorizer'].get('value') is None
def test_experiment_categorizer_cap_c_no_regions(testapp, experiment_data, mboI, exp_types):
experiment_data['experiment_type'] = exp_types['capc']['@id']
expt = testapp.post_json('/experiment_capture_c', experiment_data).json['@graph'][0]
assert expt['experiment_categorizer']['field'] == 'Enzyme'
assert expt['experiment_categorizer']['value'] == mboI['display_title']
def test_experiment_categorizer_cap_c_w_region(expt_w_targ_region, genomic_region_bio_feature):
assert expt_w_targ_region['experiment_categorizer']['field'] == 'Target'
assert expt_w_targ_region['experiment_categorizer']['value'] == genomic_region_bio_feature['display_title']
def test_experiment_categorizer_cap_c_w_2regions(
expt_w_2_targ_regions, genomic_region_bio_feature, gene_bio_feature):
wanted = ', '.join(sorted([genomic_region_bio_feature['display_title'], gene_bio_feature['display_title']]))
assert expt_w_2_targ_regions['experiment_categorizer']['field'] == 'Target'
assert expt_w_2_targ_regions['experiment_categorizer']['value'] == wanted
@pytest.fixture
def new_exp_type(lab, award):
data = {
'uuid': str(uuid4()),
'title': 'Title',
'lab': lab['@id'],
'award': award['@id'],
'status': 'released',
'valid_item_types': ['ExperimentSeq']
}
return data
def test_validate_exp_type_valid(testapp, experiment_data, new_exp_type):
exp_type1 = testapp.post_json('/experiment_type', new_exp_type).json['@graph'][0]
experiment_data['experiment_type'] = exp_type1['@id']
expt = testapp.post_json('/experiment_hi_c', experiment_data, status=422)
testapp.patch_json(exp_type1['@id'], {'valid_item_types': ['ExperimentSeq', 'ExperimentHiC']})
expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0]
assert expt['experiment_type'] == '/experiment-types/title/'
def test_validate_experiment_set_duplicate_replicate_experiments(testapp, rep_set_data, experiment):
rep_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': experiment['@id']},
{'bio_rep_no': 1, 'tec_rep_no': 2, 'replicate_exp': experiment['@id']}]
repset = testapp.post_json('/experiment_set_replicate', rep_set_data, status=422)
assert repset.json['errors'][0]['name'] == 'ExperimentSet: non-unique exps'
assert 'Duplicate experiment' in repset.json['errors'][0]['description']
|
|
'''tzinfo timezone information for Australia/NSW.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class NSW(DstTzInfo):
'''Australia/NSW timezone definition. See datetime.tzinfo for details'''
zone = 'Australia/NSW'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,12,31,14,1,0),
d(1917,3,24,15,0,0),
d(1941,12,31,16,0,0),
d(1942,3,28,15,0,0),
d(1942,9,26,16,0,0),
d(1943,3,27,15,0,0),
d(1943,10,2,16,0,0),
d(1944,3,25,15,0,0),
d(1971,10,30,16,0,0),
d(1972,2,26,16,0,0),
d(1972,10,28,16,0,0),
d(1973,3,3,16,0,0),
d(1973,10,27,16,0,0),
d(1974,3,2,16,0,0),
d(1974,10,26,16,0,0),
d(1975,3,1,16,0,0),
d(1975,10,25,16,0,0),
d(1976,3,6,16,0,0),
d(1976,10,30,16,0,0),
d(1977,3,5,16,0,0),
d(1977,10,29,16,0,0),
d(1978,3,4,16,0,0),
d(1978,10,28,16,0,0),
d(1979,3,3,16,0,0),
d(1979,10,27,16,0,0),
d(1980,3,1,16,0,0),
d(1980,10,25,16,0,0),
d(1981,2,28,16,0,0),
d(1981,10,24,16,0,0),
d(1982,4,3,16,0,0),
d(1982,10,30,16,0,0),
d(1983,3,5,16,0,0),
d(1983,10,29,16,0,0),
d(1984,3,3,16,0,0),
d(1984,10,27,16,0,0),
d(1985,3,2,16,0,0),
d(1985,10,26,16,0,0),
d(1986,3,15,16,0,0),
d(1986,10,18,16,0,0),
d(1987,3,14,16,0,0),
d(1987,10,24,16,0,0),
d(1988,3,19,16,0,0),
d(1988,10,29,16,0,0),
d(1989,3,18,16,0,0),
d(1989,10,28,16,0,0),
d(1990,3,3,16,0,0),
d(1990,10,27,16,0,0),
d(1991,3,2,16,0,0),
d(1991,10,26,16,0,0),
d(1992,2,29,16,0,0),
d(1992,10,24,16,0,0),
d(1993,3,6,16,0,0),
d(1993,10,30,16,0,0),
d(1994,3,5,16,0,0),
d(1994,10,29,16,0,0),
d(1995,3,4,16,0,0),
d(1995,10,28,16,0,0),
d(1996,3,30,16,0,0),
d(1996,10,26,16,0,0),
d(1997,3,29,16,0,0),
d(1997,10,25,16,0,0),
d(1998,3,28,16,0,0),
d(1998,10,24,16,0,0),
d(1999,3,27,16,0,0),
d(1999,10,30,16,0,0),
d(2000,3,25,16,0,0),
d(2000,8,26,16,0,0),
d(2001,3,24,16,0,0),
d(2001,10,27,16,0,0),
d(2002,3,30,16,0,0),
d(2002,10,26,16,0,0),
d(2003,3,29,16,0,0),
d(2003,10,25,16,0,0),
d(2004,3,27,16,0,0),
d(2004,10,30,16,0,0),
d(2005,3,26,16,0,0),
d(2005,10,29,16,0,0),
d(2006,4,1,16,0,0),
d(2006,10,28,16,0,0),
d(2007,3,24,16,0,0),
d(2007,10,27,16,0,0),
d(2008,3,29,16,0,0),
d(2008,10,25,16,0,0),
d(2009,3,28,16,0,0),
d(2009,10,24,16,0,0),
d(2010,3,27,16,0,0),
d(2010,10,30,16,0,0),
d(2011,3,26,16,0,0),
d(2011,10,29,16,0,0),
d(2012,3,24,16,0,0),
d(2012,10,27,16,0,0),
d(2013,3,30,16,0,0),
d(2013,10,26,16,0,0),
d(2014,3,29,16,0,0),
d(2014,10,25,16,0,0),
d(2015,3,28,16,0,0),
d(2015,10,24,16,0,0),
d(2016,3,26,16,0,0),
d(2016,10,29,16,0,0),
d(2017,3,25,16,0,0),
d(2017,10,28,16,0,0),
d(2018,3,24,16,0,0),
d(2018,10,27,16,0,0),
d(2019,3,30,16,0,0),
d(2019,10,26,16,0,0),
d(2020,3,28,16,0,0),
d(2020,10,24,16,0,0),
d(2021,3,27,16,0,0),
d(2021,10,30,16,0,0),
d(2022,3,26,16,0,0),
d(2022,10,29,16,0,0),
d(2023,3,25,16,0,0),
d(2023,10,28,16,0,0),
d(2024,3,30,16,0,0),
d(2024,10,26,16,0,0),
d(2025,3,29,16,0,0),
d(2025,10,25,16,0,0),
d(2026,3,28,16,0,0),
d(2026,10,24,16,0,0),
d(2027,3,27,16,0,0),
d(2027,10,30,16,0,0),
d(2028,3,25,16,0,0),
d(2028,10,28,16,0,0),
d(2029,3,24,16,0,0),
d(2029,10,27,16,0,0),
d(2030,3,30,16,0,0),
d(2030,10,26,16,0,0),
d(2031,3,29,16,0,0),
d(2031,10,25,16,0,0),
d(2032,3,27,16,0,0),
d(2032,10,30,16,0,0),
d(2033,3,26,16,0,0),
d(2033,10,29,16,0,0),
d(2034,3,25,16,0,0),
d(2034,10,28,16,0,0),
d(2035,3,24,16,0,0),
d(2035,10,27,16,0,0),
d(2036,3,29,16,0,0),
d(2036,10,25,16,0,0),
d(2037,3,28,16,0,0),
d(2037,10,24,16,0,0),
]
_transition_info = [
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
]
NSW = NSW()
|
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 63