Merge branch 'feature/global_store' into 'master'

Global Store and Configuration Implementation

See merge request caimira/caimira!466
This commit is contained in:
Luis Aleixo 2023-11-20 11:50:15 +01:00
commit f840a941ae
12 changed files with 931 additions and 319 deletions

View file

@ -28,7 +28,6 @@ import tornado.log
from . import markdown_tools
from . import model_generator
from .report_generator import ReportGenerator, calculate_report_data
from .data_service import DataService
from .user import AuthenticatedUser, AnonymousUser
# The calculator version is based on a combination of the model version and the
@ -38,7 +37,7 @@ from .user import AuthenticatedUser, AnonymousUser
# calculator version. If the calculator needs to make breaking changes (e.g. change
# form attributes) then it can also increase its MAJOR version without needing to
# increase the overall CAiMIRA version (found at ``caimira.__version__``).
__version__ = "4.12.1"
__version__ = "4.13.0"
LOG = logging.getLogger(__name__)
@ -105,17 +104,6 @@ class ConcentrationModel(BaseRequestHandler):
from pprint import pprint
pprint(requested_model_config)
start = datetime.datetime.now()
# Data Service API Integration
fetched_service_data = None
data_service: DataService = self.settings["data_service"]
if self.settings["data_service"]:
try:
fetched_service_data = await data_service.fetch()
except Exception as err:
error_message = f"Something went wrong with the data service: {str(err)}"
LOG.error(error_message, exc_info=True)
self.send_error(500, reason=error_message)
try:
form = model_generator.FormData.from_dict(requested_model_config)
@ -429,15 +417,6 @@ def make_app(
)
template_environment.globals['get_url']=get_root_url
template_environment.globals['get_calculator_url']=get_root_calculator_url
data_service_credentials = {
'data_service_client_email': os.environ.get('DATA_SERVICE_CLIENT_EMAIL', None),
'data_service_client_password': os.environ.get('DATA_SERVICE_CLIENT_PASSWORD', None),
}
data_service = None
data_service_enabled = os.environ.get('DATA_SERVICE_ENABLED', 'False').lower() == 'true'
if data_service_enabled:
data_service = DataService(data_service_credentials)
if debug:
tornado.log.enable_pretty_logging()
@ -456,9 +435,6 @@ def make_app(
arve_client_secret=os.environ.get('ARVE_CLIENT_SECRET', None),
arve_api_key=os.environ.get('ARVE_API_KEY', None),
# Data Service Integration
data_service=data_service,
# Process parallelism controls. There is a balance between serving a single report
# requests quickly or serving multiple requests concurrently.
# The defaults are: handle one report at a time, and allow parallelism

View file

@ -1,74 +0,0 @@
import dataclasses
import json
import logging
import typing
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
LOG = logging.getLogger(__name__)
@dataclasses.dataclass
class DataService():
'''
Responsible for establishing a connection to a
database through a REST API by handling authentication
and fetching data. It utilizes the Tornado web framework
for asynchronous HTTP requests.
'''
# Credentials used for authentication
credentials: dict
# Host URL for the CAiMIRA Data Service API
host: str = 'https://caimira-data-api.app.cern.ch'
# Cached access token
_access_token: typing.Optional[str] = None
def _is_valid(self, access_token):
# decode access_token
# check validity
return False
async def _login(self):
if self._is_valid(self._access_token):
return self._access_token
# invalid access_token, fetch it again
client_email = self.credentials["data_service_client_email"]
client_password = self.credentials['data_service_client_password']
if (client_email == None or client_password == None):
# If the credentials are not defined, an exception is raised.
raise Exception("DataService credentials not set")
http_client = AsyncHTTPClient()
headers = {'Content-type': 'application/json'}
json_body = { "email": f"{client_email}", "password": f"{client_password}"}
response = await http_client.fetch(HTTPRequest(
url=self.host + '/login',
method='POST',
headers=headers,
body=json.dumps(json_body),
),
raise_error=True)
self._access_token = json.loads(response.body)['access_token']
return self._access_token
async def fetch(self):
access_token = await self._login()
http_client = AsyncHTTPClient()
headers = {'Authorization': f'Bearer {access_token}'}
response = await http_client.fetch(HTTPRequest(
url=self.host + '/data',
method='GET',
headers=headers,
),
raise_error=True)
return json.loads(response.body)

View file

@ -1,11 +1,13 @@
import typing
from caimira.store.configuration import config
# ------------------ Default form values ----------------------
# Used to declare when an attribute of a class must have a value provided, and
# there should be no default value used.
NO_DEFAULT = object()
DEFAULT_MC_SAMPLE_SIZE = 250_000
DEFAULT_MC_SAMPLE_SIZE = config.monte_carlo_sample_size
#: The default values for undefined fields. Note that the defaults here
#: and the defaults in the html form must not be contradictory.
@ -79,57 +81,17 @@ DEFAULTS = {
# ------------------ Activities ----------------------
ACTIVITIES: typing.List[typing.Dict[str, typing.Any]] = [
# Mostly silent in the office, but 1/3rd of time speaking.
{'name': 'office', 'activity': 'Seated',
'expiration': {'Speaking': 1, 'Breathing': 2}},
{'name': 'smallmeeting', 'activity': 'Seated',
'expiration': {'Speaking': 1, 'Breathing': None}},
# Each infected person spends 1/3 of time speaking.
{'name': 'largemeeting', 'activity': 'Standing',
'expiration': {'Speaking': 1, 'Breathing': 2}},
{'name': 'callcentre', 'activity': 'Seated', 'expiration': 'Speaking'},
# Daytime control room shift, 50% speaking.
{'name': 'controlroom-day', 'activity': 'Seated',
'expiration': {'Speaking': 1, 'Breathing': 1}},
# Nightshift control room, 10% speaking.
{'name': 'controlroom-night', 'activity': 'Seated',
'expiration': {'Speaking': 1, 'Breathing': 9}},
{'name': 'library', 'activity': 'Seated', 'expiration': 'Breathing'},
# Model 1/2 of time spent speaking in a lab.
{'name': 'lab', 'activity': 'Light activity',
'expiration': {'Speaking': 1, 'Breathing': 1}},
# Model 1/2 of time spent speaking in a workshop.
{'name': 'workshop', 'activity': 'Moderate activity',
'expiration': {'Speaking': 1, 'Breathing': 1}},
{'name': 'training', 'activity': 'Standing', 'expiration': 'Speaking'},
{'name': 'training_attendee', 'activity': 'Seated', 'expiration': 'Breathing'},
{'name': 'gym', 'activity': 'Heavy exercise', 'expiration': 'Breathing'},
{'name': 'household-day', 'activity': 'Light activity',
'expiration': {'Breathing': 5, 'Speaking': 5}},
{'name': 'household-night', 'activity': 'Seated',
'expiration': {'Breathing': 7, 'Speaking': 3}},
{'name': 'primary-school', 'activity': 'Light activity',
'expiration': {'Breathing': 5, 'Speaking': 5}},
{'name': 'secondary-school', 'activity': 'Light activity',
'expiration': {'Breathing': 7, 'Speaking': 3}},
{'name': 'university', 'activity': 'Seated',
'expiration': {'Breathing': 9, 'Speaking': 1}},
{'name': 'restaurant', 'activity': 'Seated',
'expiration': {'Breathing': 1, 'Speaking': 9}},
{'name': 'precise', 'activity': None, 'expiration': None},
]
ACTIVITIES: typing.Dict[str, typing.Dict] = config.population_scenario_activity
# ------------------ Validation ----------------------
ACTIVITY_TYPES = [activity['name'] for activity in ACTIVITIES]
ACTIVITY_TYPES: typing.List[str] = list(ACTIVITIES.keys())
COFFEE_OPTIONS_INT = {'coffee_break_0': 0, 'coffee_break_1': 1,
'coffee_break_2': 2, 'coffee_break_4': 4}
CONFIDENCE_LEVEL_OPTIONS = {'confidence_low': 10,
'confidence_medium': 5, 'confidence_high': 2}
MECHANICAL_VENTILATION_TYPES = {
'mech_type_air_changes', 'mech_type_air_supply', 'not-applicable'}
MASK_TYPES = {'Type I', 'FFP2', 'Cloth'}
MASK_TYPES: typing.List[str] = list(config.mask_distributions.keys())
MASK_WEARING_OPTIONS = {'mask_on', 'mask_off'}
MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June', 'July',
@ -143,8 +105,7 @@ VACCINE_TYPE = ['Ad26.COV2.S_(Janssen)', 'Any_mRNA_-_heterologous', 'AZD1222_(As
'mRNA-1273_(Moderna)', 'Sputnik_V_(Gamaleya)', 'CoronaVac_(Sinovac)_and_BNT162b2_(Pfizer)']
VENTILATION_TYPES = {'natural_ventilation',
'mechanical_ventilation', 'no_ventilation'}
VIRUS_TYPES = {'SARS_CoV_2', 'SARS_CoV_2_ALPHA', 'SARS_CoV_2_BETA',
'SARS_CoV_2_GAMMA', 'SARS_CoV_2_DELTA', 'SARS_CoV_2_OMICRON'}
VIRUS_TYPES: typing.List[str] = list(config.virus_distributions)
VOLUME_TYPES = {'room_volume_explicit', 'room_volume_from_dimensions'}
WINDOWS_OPENING_REGIMES = {'windows_open_permanently',
'windows_open_periodically', 'not-applicable'}

View file

@ -19,6 +19,7 @@ from caimira.monte_carlo.data import expiration_distribution, expiration_BLO_fac
from .defaults import (NO_DEFAULT, DEFAULT_MC_SAMPLE_SIZE, DEFAULTS, ACTIVITIES, ACTIVITY_TYPES, COFFEE_OPTIONS_INT, CONFIDENCE_LEVEL_OPTIONS,
MECHANICAL_VENTILATION_TYPES, MASK_TYPES, MASK_WEARING_OPTIONS, MONTH_NAMES, VACCINE_BOOSTER_TYPE, VACCINE_TYPE,
VENTILATION_TYPES, VIRUS_TYPES, VOLUME_TYPES, WINDOWS_OPENING_REGIMES, WINDOWS_TYPES)
from caimira.store.configuration import config
LOG = logging.getLogger(__name__)
@ -314,10 +315,10 @@ class FormData:
if self.arve_sensors_option == False:
if self.room_heating_option:
humidity = 0.3
humidity = config.room['defaults']['humidity_with_heating']
else:
humidity = 0.5
inside_temp = 293.
humidity = config.room['defaults']['humidity_without_heating']
inside_temp = config.room['defaults']['inside_temp']
else:
humidity = float(self.humidity)
inside_temp = self.inside_temp
@ -373,7 +374,7 @@ class FormData:
if (self.activity_type == 'precise'):
activity_defn, _ = self.generate_precise_activity_expiration()
else:
activity_defn = ACTIVITIES[ACTIVITY_TYPES.index(self.activity_type)]['activity']
activity_defn = activity_defn = ACTIVITIES[self.activity_type]['activity']
population = mc.SimplePopulation(
number=models.IntPiecewiseConstant(transition_times=tuple(transition_times), values=tuple(total_people)),
@ -476,7 +477,8 @@ class FormData:
# This is a minimal, always present source of ventilation, due
# to the air infiltration from the outside.
# See CERN-OPEN-2021-004, p. 12.
infiltration_ventilation = models.AirChange(active=always_on, air_exch=0.25)
residual_vent: float = config.ventilation['infiltration_ventilation'] # type: ignore
infiltration_ventilation = models.AirChange(active=always_on, air_exch=residual_vent)
if self.hepa_option:
hepa = models.HEPAFilter(active=always_on, q_air_mech=self.hepa_amount)
return models.MultipleVentilation((ventilation, hepa, infiltration_ventilation))
@ -511,9 +513,8 @@ class FormData:
# Initializes the virus
virus = virus_distributions[self.virus_type]
activity_index = ACTIVITY_TYPES.index(self.activity_type)
activity_defn = ACTIVITIES[activity_index]['activity']
expiration_defn = ACTIVITIES[activity_index]['expiration']
activity_defn = ACTIVITIES[self.activity_type]['activity']
expiration_defn = ACTIVITIES[self.activity_type]['expiration']
if (self.activity_type == 'smallmeeting'):
# Conversation of N people is approximately 1/N% of the time speaking.
@ -538,29 +539,9 @@ class FormData:
return infected
def exposed_population(self) -> mc.Population:
scenario_activity = {
'office': 'Seated',
'controlroom-day': 'Seated',
'controlroom-night': 'Seated',
'smallmeeting': 'Seated',
'largemeeting': 'Standing',
'callcentre': 'Seated',
'library': 'Seated',
'training': 'Standing',
'training_attendee': 'Seated',
'lab':'Light activity',
'workshop': 'Moderate activity',
'gym':'Heavy exercise',
'household-day': 'Light activity',
'household-night': 'Seated',
'primary-school': 'Light activity',
'secondary-school': 'Light activity',
'university': 'Seated',
'restaurant': 'Seated',
'precise': self.precise_activity['physical_activity'] if self.activity_type == 'precise' else None,
}
activity_defn = scenario_activity[self.activity_type]
activity_defn = (self.precise_activity['physical_activity']
if self.activity_type == 'precise'
else str(config.population_scenario_activity[self.activity_type]['activity']))
activity = activity_distributions[activity_defn]
infected_occupants = self.infected_people

View file

@ -17,6 +17,7 @@ from caimira.apps.calculator import markdown_tools
from ... import monte_carlo as mc
from .model_generator import FormData, DEFAULT_MC_SAMPLE_SIZE
from ... import dataclass_utils
from caimira.store.configuration import config
def model_start_end(model: models.ExposureModel):
@ -201,8 +202,8 @@ def conditional_prob_inf_given_vl_dist(infection_probability: models._Vectorised
for vl_log in viral_loads:
specific_prob = infection_probability[np.where((vl_log-step/2-specific_vl)*(vl_log+step/2-specific_vl)<0)[0]] #type: ignore
pi_means.append(specific_prob.mean())
lower_percentiles.append(np.quantile(specific_prob, 0.05))
upper_percentiles.append(np.quantile(specific_prob, 0.95))
lower_percentiles.append(np.quantile(specific_prob, config.conditional_prob_inf_given_viral_load['lower_percentile']))
upper_percentiles.append(np.quantile(specific_prob, config.conditional_prob_inf_given_viral_load['upper_percentile']))
return pi_means, lower_percentiles, upper_percentiles
@ -210,7 +211,9 @@ def conditional_prob_inf_given_vl_dist(infection_probability: models._Vectorised
def manufacture_conditional_probability_data(exposure_model: models.ExposureModel,
infection_probability: models._VectorisedFloat):
min_vl, max_vl, step = 2, 10, 8/100
min_vl = config.conditional_prob_inf_given_viral_load['min_vl']
max_vl = config.conditional_prob_inf_given_viral_load['max_vl']
step = (max_vl - min_vl)/100
viral_loads = np.arange(min_vl, max_vl, step)
specific_vl = np.log10(exposure_model.concentration_model.virus.viral_load_in_sputum)
pi_means, lower_percentiles, upper_percentiles = conditional_prob_inf_given_vl_dist(infection_probability, viral_loads,

View file

@ -49,6 +49,7 @@ else:
from .utils import method_cache
from .dataclass_utils import nested_replace
from caimira.store.configuration import config
oneoverln2 = 1 / np.log(2)
# Define types for items supporting vectorisation. In the future this may be replaced
@ -347,7 +348,7 @@ class SlidingWindow(WindowOpening):
Average measured value of discharge coefficient for sliding or
side-hung windows.
"""
return 0.6
return config.ventilation['natural']['discharge_factor']['sliding'] # type: ignore
@dataclass(frozen=True)
@ -860,7 +861,7 @@ class _PopulationWithVirus(Population):
The fraction of infectious virus.
"""
return 1.
return config.population_with_virus['fraction_of_infectious_virus'] # type: ignore
def aerosols(self):
"""
@ -1032,7 +1033,7 @@ class _ConcentrationModelBase:
(in the same unit as the concentration). Its the value towards which
the concentration will decay to.
"""
return 0.
return config.concentration_model['min_background_concentration'] # type: ignore
def normalization_factor(self) -> _VectorisedFloat:
"""
@ -1220,7 +1221,7 @@ class ConcentrationModel(_ConcentrationModelBase):
#: evaporation factor: the particles' diameter is multiplied by this
# factor as soon as they are in the air (but AFTER going out of the,
# mask, if any).
evaporation_factor: float = 0.3
evaporation_factor: float = config.particle['evaporation_factor'] # type: ignore
@property
def population(self) -> InfectedPopulation:
@ -1260,10 +1261,10 @@ class CO2ConcentrationModel(_ConcentrationModelBase):
CO2_emitters: SimplePopulation
#: CO2 concentration in the atmosphere (in ppm)
CO2_atmosphere_concentration: float = 440.44
CO2_atmosphere_concentration: float = config.concentration_model['CO2_concentration_model']['CO2_atmosphere_concentration'] # type: ignore
#: CO2 fraction in the exhaled air
CO2_fraction_exhaled: float = 0.042
CO2_fraction_exhaled: float = config.concentration_model['CO2_concentration_model']['CO2_fraction_exhaled'] # type: ignore
@property
def population(self) -> SimplePopulation:
@ -1309,14 +1310,14 @@ class ShortRangeModel:
The dilution factor for the respective expiratory activity type.
'''
# Average mouth opening diameter (m)
mouth_diameter = 0.02
mouth_diameter: float = config.short_range_model['dilution_factor']['mouth_diameter'] # type: ignore
# Breathing rate, from m3/h to m3/s
BR = np.array(self.activity.exhalation_rate/3600.)
# Exhalation coefficient. Ratio between the duration of a breathing cycle and the duration of
# the exhalation.
φ = 2
φ: float = config.short_range_model['dilution_factor']['exhalation_coefficient'] # type: ignore
# Exhalation airflow, as per Jia et al. (2022)
Q_exh: _VectorisedFloat = φ * BR
@ -1328,12 +1329,12 @@ class ShortRangeModel:
u0 = np.array(Q_exh/Am)
# Duration of the expiration period(s), assuming a 4s breath-cycle
tstar = 2.0
tstar: float = config.short_range_model['dilution_factor']['tstar'] # type: ignore
# Streamwise and radial penetration coefficients
𝛽r1 = 0.18
𝛽r2 = 0.2
𝛽x1 = 2.4
𝛽r1: float = config.short_range_model['dilution_factor']['penetration_coefficients']['𝛽r1'] # type: ignore
𝛽r2: float = config.short_range_model['dilution_factor']['penetration_coefficients']['𝛽r2'] # type: ignore
𝛽x1: float = config.short_range_model['dilution_factor']['penetration_coefficients']['𝛽x1'] # type: ignore
# Parameters in the jet-like stage
# Position of virtual origin
@ -1489,7 +1490,7 @@ class ExposureModel:
geographical_data: Cases
#: The number of times the exposure event is repeated (default 1).
repeats: int = 1
repeats: int = config.exposure_model['repeats'] # type: ignore
def __post_init__(self):
"""

View file

@ -6,12 +6,115 @@ from scipy import special as sp
from scipy.stats import weibull_min
import caimira.monte_carlo as mc
from caimira.monte_carlo.sampleable import LogCustom, LogNormal,LogCustomKernel,CustomKernel,Uniform, Custom
from caimira.monte_carlo.sampleable import LogCustom, LogNormal, Normal, LogCustomKernel, CustomKernel, Uniform, Custom
from caimira.store.configuration import config
sqrt2pi = np.sqrt(2.*np.pi)
sqrt2 = np.sqrt(2.)
def custom_distribution_lookup(dict: dict, key_part: str) -> typing.Any:
"""
Look up a custom distribution based on a partial key.
Args:
dict (dict): The root to search.
key_part (str): The distribution key to match.
Returns:
str: The associated distribution.
"""
try:
for key, value in dict.items():
if (key_part in key):
return value['associated_distribution']
except KeyError:
return f"Key '{key_part}' not found."
def evaluate_reference(reference_variable: str) -> typing.Any:
"""
Evaluate a reference variable.
Args:
reference_variable (str): The variable to evaluate.
Returns:
Any: The evaluated value or an error message if the variable is not defined.
"""
try:
return eval(reference_variable)
except NameError:
return f"Variable '{reference_variable}' is not defined."
def evaluate_custom_distribution(dist: str, params: typing.Dict) -> typing.Any:
"""
Evaluate a custom distribution.
Args:
dist (str): The type of distribution.
params (Dict): The parameters for the distribution.
Returns:
Any: The generated distribution.
Raises:
ValueError: If the distribution type is not recognized.
"""
if dist == 'Numpy Linear Space (linspace)':
return np.linspace(params['start'], params['stop'], params['num'])
elif dist == 'Numpy Normal Distribution (random.normal)':
return Normal(params['mean_gaussian'], params['standard_deviation_gaussian'])
elif dist == 'Numpy Log-normal Distribution (random.lognormal)':
return LogNormal(params['mean_gaussian'], params['standard_deviation_gaussian'])
elif dist == 'Numpy Uniform Distribution (random.uniform)':
return Uniform(params['low'], params['high'])
else:
raise ValueError('Bad request - distribution not found.')
def param_evaluation(root: typing.Dict, param: typing.Union[str, typing.Any]) -> typing.Any:
"""
Evaluate a parameter from a nested dictionary.
Args:
root (dict): The root dictionary.
param (Union[str, Any]): The parameter to evaluate.
Returns:
Any: The evaluated value.
Raises:
TypeError: If the type of the parameter is not defined.
"""
value = root.get(param)
if isinstance(value, str):
if value.startswith('Ref:'):
reference_variable = value.split(' - ')[1].strip()
return evaluate_reference(reference_variable)
elif value == 'Custom':
custom_distribution: typing.Dict = custom_distribution_lookup(
root, 'custom distribution')
for d, p in custom_distribution.items():
return evaluate_custom_distribution(d, p)
elif isinstance(value, dict):
dist: str = root[param]['associated_distribution']
params: typing.Dict = root[param]['parameters']
return evaluate_custom_distribution(dist, params)
elif isinstance(value, float) or isinstance(value, int):
return value
else:
raise TypeError('Bad request - type not allowed.')
@dataclass(frozen=True)
class BLOmodel:
"""
@ -34,25 +137,37 @@ class BLOmodel:
#: cn (cm^-3) for resp. the B, L and O modes. Corresponds to the
# total concentration of aerosols for each mode.
cn: typing.Tuple[float, float, float] = (0.06, 0.2, 0.0010008)
cn: typing.Tuple[float, float, float] = (
config.BLOmodel['cn']['B'],
config.BLOmodel['cn']['L'],
config.BLOmodel['cn']['O']
)
# Mean of the underlying normal distributions (represents the log of a
# diameter in microns), for resp. the B, L and O modes.
mu: typing.Tuple[float, float, float] = (0.989541, 1.38629, 4.97673)
mu: typing.Tuple[float, float, float] = (
config.BLOmodel['mu']['B'],
config.BLOmodel['mu']['L'],
config.BLOmodel['mu']['O']
)
# Std deviation of the underlying normal distribution, for resp.
# the B, L and O modes.
sigma: typing.Tuple[float, float, float] = (0.262364, 0.506818, 0.585005)
sigma: typing.Tuple[float, float, float] = (
config.BLOmodel['sigma']['B'],
config.BLOmodel['sigma']['L'],
config.BLOmodel['sigma']['O']
)
def distribution(self, d):
"""
Returns the raw value of the probability distribution for a
given diameter d (microns).
"""
return sum( (1 / d) * (A * cn / (sqrt2pi * sigma)) *
np.exp(-(np.log(d) - mu) ** 2 / (2 * sigma ** 2))
for A,cn,mu,sigma in zip(self.BLO_factors, self.cn,
self.mu, self.sigma) )
return sum((1 / d) * (A * cn / (sqrt2pi * sigma)) *
np.exp(-(np.log(d) - mu) ** 2 / (2 * sigma ** 2))
for A, cn, mu, sigma in zip(self.BLO_factors, self.cn,
self.mu, self.sigma))
def integrate(self, dmin, dmax):
"""
@ -60,7 +175,7 @@ class BLOmodel:
probability distribution.
"""
result = 0.
for A,cn,mu,sigma in zip(self.BLO_factors, self.cn, self.mu, self.sigma):
for A, cn, mu, sigma in zip(self.BLO_factors, self.cn, self.mu, self.sigma):
ymin = (np.log(dmin)-mu)/(sqrt2*sigma)
ymax = (np.log(dmax)-mu)/(sqrt2*sigma)
result += A * cn * (sp.erf(ymax)-sp.erf(ymin)) / 2.
@ -69,35 +184,55 @@ class BLOmodel:
# From https://doi.org/10.1101/2021.10.14.21264988 and references therein
activity_distributions = {
'Seated': mc.Activity(LogNormal(-0.6872121723362303, 0.10498338229297108),
LogNormal(-0.6872121723362303, 0.10498338229297108)),
'Seated': mc.Activity(
inhalation_rate=param_evaluation(
config.activity_distributions['Seated'], 'inhalation_rate'),
exhalation_rate=param_evaluation(
config.activity_distributions['Seated'], 'exhalation_rate'),
),
'Standing': mc.Activity(LogNormal(-0.5742377578494785, 0.09373162411398223),
LogNormal(-0.5742377578494785, 0.09373162411398223)),
'Standing': mc.Activity(
inhalation_rate=param_evaluation(
config.activity_distributions['Standing'], 'inhalation_rate'),
exhalation_rate=param_evaluation(
config.activity_distributions['Standing'], 'exhalation_rate'),
),
'Light activity': mc.Activity(LogNormal(0.21380242785625422,0.09435378091059601),
LogNormal(0.21380242785625422,0.09435378091059601)),
'Light activity': mc.Activity(
inhalation_rate=param_evaluation(
config.activity_distributions['Light activity'], 'inhalation_rate'),
exhalation_rate=param_evaluation(
config.activity_distributions['Light activity'], 'exhalation_rate'),
),
'Moderate activity': mc.Activity(LogNormal(0.551771330362601, 0.1894616357138137),
LogNormal(0.551771330362601, 0.1894616357138137)),
'Moderate activity': mc.Activity(
inhalation_rate=param_evaluation(
config.activity_distributions['Moderate activity'], 'inhalation_rate'),
exhalation_rate=param_evaluation(
config.activity_distributions['Moderate activity'], 'exhalation_rate'),
),
'Heavy exercise': mc.Activity(LogNormal(1.1644665696723049, 0.21744554768657565),
LogNormal(1.1644665696723049, 0.21744554768657565)),
'Heavy exercise': mc.Activity(
inhalation_rate=param_evaluation(
config.activity_distributions['Heavy exercise'], 'inhalation_rate'),
exhalation_rate=param_evaluation(
config.activity_distributions['Heavy exercise'], 'exhalation_rate'),
),
}
# From https://doi.org/10.1101/2021.10.14.21264988 and references therein
symptomatic_vl_frequencies = LogCustomKernel(
np.array((2.46032, 2.67431, 2.85434, 3.06155, 3.25856, 3.47256, 3.66957, 3.85979, 4.09927, 4.27081,
4.47631, 4.66653, 4.87204, 5.10302, 5.27456, 5.46478, 5.6533, 5.88428, 6.07281, 6.30549,
6.48552, 6.64856, 6.85407, 7.10373, 7.30075, 7.47229, 7.66081, 7.85782, 8.05653, 8.27053,
8.48453, 8.65607, 8.90573, 9.06878, 9.27429, 9.473, 9.66152, 9.87552)),
4.47631, 4.66653, 4.87204, 5.10302, 5.27456, 5.46478, 5.6533, 5.88428, 6.07281, 6.30549,
6.48552, 6.64856, 6.85407, 7.10373, 7.30075, 7.47229, 7.66081, 7.85782, 8.05653, 8.27053,
8.48453, 8.65607, 8.90573, 9.06878, 9.27429, 9.473, 9.66152, 9.87552)),
np.array((0.001206885, 0.007851618, 0.008078144, 0.01502491, 0.013258014, 0.018528495, 0.020053765,
0.021896167, 0.022047184, 0.018604005, 0.01547796, 0.018075445, 0.021503523, 0.022349217,
0.025097721, 0.032875078, 0.030594727, 0.032573045, 0.034717482, 0.034792991,
0.033267721, 0.042887485, 0.036846816, 0.03876473, 0.045016819, 0.040063473, 0.04883754,
0.043944602, 0.048142864, 0.041588741, 0.048762031, 0.027921732, 0.033871788,
0.022122693, 0.016927718, 0.008833228, 0.00478598, 0.002807662)),
0.021896167, 0.022047184, 0.018604005, 0.01547796, 0.018075445, 0.021503523, 0.022349217,
0.025097721, 0.032875078, 0.030594727, 0.032573045, 0.034717482, 0.034792991,
0.033267721, 0.042887485, 0.036846816, 0.03876473, 0.045016819, 0.040063473, 0.04883754,
0.043944602, 0.048142864, 0.041588741, 0.048762031, 0.027921732, 0.033871788,
0.022122693, 0.016927718, 0.008833228, 0.00478598, 0.002807662)),
kernel_bandwidth=0.1
)
@ -105,61 +240,103 @@ symptomatic_vl_frequencies = LogCustomKernel(
# Weibull distribution with a shape factor of 3.47 and a scale factor of 7.01.
# From https://elifesciences.org/articles/65774 and first line of the figure in
# https://iiif.elifesciences.org/lax:65774%2Felife-65774-fig4-figsupp3-v2.tif/full/1500,/0/default.jpg
viral_load = np.linspace(weibull_min.ppf(0.01, c=3.47, scale=7.01),
weibull_min.ppf(0.99, c=3.47, scale=7.01), 30)
frequencies_pdf = weibull_min.pdf(viral_load, c=3.47, scale=7.01)
covid_overal_vl_data = LogCustom(bounds=(2, 10),
function=lambda d: np.interp(d, viral_load, frequencies_pdf, left=0., right=0.),
max_function=0.2)
viral_load = np.linspace(
weibull_min.ppf(
config.covid_overal_vl_data['start'],
c=config.covid_overal_vl_data['shape_factor'],
scale=config.covid_overal_vl_data['scale_factor']
),
weibull_min.ppf(
config.covid_overal_vl_data['stop'],
c=config.covid_overal_vl_data['shape_factor'],
scale=config.covid_overal_vl_data['scale_factor']
),
int(config.covid_overal_vl_data['num'])
)
frequencies_pdf = weibull_min.pdf(
viral_load,
c=config.covid_overal_vl_data['shape_factor'],
scale=config.covid_overal_vl_data['scale_factor']
)
covid_overal_vl_data = LogCustom(bounds=(config.covid_overal_vl_data['min_bound'], config.covid_overal_vl_data['max_bound']),
function=lambda d: np.interp(d, viral_load, frequencies_pdf, config.covid_overal_vl_data[
'interpolation_fp_left'], config.covid_overal_vl_data['interpolation_fp_right']),
max_function=config.covid_overal_vl_data['max_function'])
# Derived from data in doi.org/10.1016/j.ijid.2020.09.025 and
# https://iosh.com/media/8432/aerosol-infection-risk-hospital-patient-care-full-report.pdf (page 60)
viable_to_RNA_ratio_distribution = Uniform(0.01, 0.6)
viable_to_RNA_ratio_distribution = Uniform(
config.viable_to_RNA_ratio_distribution['low'], config.viable_to_RNA_ratio_distribution['high'])
# From discussion with virologists
infectious_dose_distribution = Uniform(10., 100.)
infectious_dose_distribution = Uniform(
config.infectious_dose_distribution['low'], config.infectious_dose_distribution['high'])
# From https://doi.org/10.1101/2021.10.14.21264988 and refererences therein
virus_distributions = {
'SARS_CoV_2': mc.SARSCoV2(
viral_load_in_sputum=covid_overal_vl_data,
infectious_dose=infectious_dose_distribution,
viable_to_RNA_ratio=viable_to_RNA_ratio_distribution,
transmissibility_factor=1.,
),
viral_load_in_sputum=param_evaluation(
config.virus_distributions['SARS_CoV_2'], 'viral_load_in_sputum'),
infectious_dose=param_evaluation(
config.virus_distributions['SARS_CoV_2'], 'infectious_dose'),
viable_to_RNA_ratio=param_evaluation(
config.virus_distributions['SARS_CoV_2'], 'viable_to_RNA_ratio'),
transmissibility_factor=param_evaluation(
config.virus_distributions['SARS_CoV_2'], 'transmissibility_factor'),
),
'SARS_CoV_2_ALPHA': mc.SARSCoV2(
viral_load_in_sputum=covid_overal_vl_data,
infectious_dose=infectious_dose_distribution,
viable_to_RNA_ratio=viable_to_RNA_ratio_distribution,
transmissibility_factor=0.78,
),
viral_load_in_sputum=param_evaluation(
config.virus_distributions['SARS_CoV_2_ALPHA'], 'viral_load_in_sputum'),
infectious_dose=param_evaluation(
config.virus_distributions['SARS_CoV_2_ALPHA'], 'infectious_dose'),
viable_to_RNA_ratio=param_evaluation(
config.virus_distributions['SARS_CoV_2_ALPHA'], 'viable_to_RNA_ratio'),
transmissibility_factor=param_evaluation(
config.virus_distributions['SARS_CoV_2_ALPHA'], 'transmissibility_factor'),
),
'SARS_CoV_2_BETA': mc.SARSCoV2(
viral_load_in_sputum=covid_overal_vl_data,
infectious_dose=infectious_dose_distribution,
viable_to_RNA_ratio=viable_to_RNA_ratio_distribution,
transmissibility_factor=0.8,
),
viral_load_in_sputum=param_evaluation(
config.virus_distributions['SARS_CoV_2_BETA'], 'viral_load_in_sputum'),
infectious_dose=param_evaluation(
config.virus_distributions['SARS_CoV_2_BETA'], 'infectious_dose'),
viable_to_RNA_ratio=param_evaluation(
config.virus_distributions['SARS_CoV_2_BETA'], 'viable_to_RNA_ratio'),
transmissibility_factor=param_evaluation(
config.virus_distributions['SARS_CoV_2_BETA'], 'transmissibility_factor'),
),
'SARS_CoV_2_GAMMA': mc.SARSCoV2(
viral_load_in_sputum=covid_overal_vl_data,
infectious_dose=infectious_dose_distribution,
viable_to_RNA_ratio=viable_to_RNA_ratio_distribution,
transmissibility_factor=0.72,
),
viral_load_in_sputum=param_evaluation(
config.virus_distributions['SARS_CoV_2_GAMMA'], 'viral_load_in_sputum'),
infectious_dose=param_evaluation(
config.virus_distributions['SARS_CoV_2_GAMMA'], 'infectious_dose'),
viable_to_RNA_ratio=param_evaluation(
config.virus_distributions['SARS_CoV_2_GAMMA'], 'viable_to_RNA_ratio'),
transmissibility_factor=param_evaluation(
config.virus_distributions['SARS_CoV_2_GAMMA'], 'transmissibility_factor'),
),
'SARS_CoV_2_DELTA': mc.SARSCoV2(
viral_load_in_sputum=covid_overal_vl_data,
infectious_dose=infectious_dose_distribution,
viable_to_RNA_ratio=viable_to_RNA_ratio_distribution,
transmissibility_factor=0.51,
),
viral_load_in_sputum=param_evaluation(
config.virus_distributions['SARS_CoV_2_DELTA'], 'viral_load_in_sputum'),
infectious_dose=param_evaluation(
config.virus_distributions['SARS_CoV_2_DELTA'], 'infectious_dose'),
viable_to_RNA_ratio=param_evaluation(
config.virus_distributions['SARS_CoV_2_DELTA'], 'viable_to_RNA_ratio'),
transmissibility_factor=param_evaluation(
config.virus_distributions['SARS_CoV_2_DELTA'], 'transmissibility_factor'),
),
'SARS_CoV_2_OMICRON': mc.SARSCoV2(
viral_load_in_sputum=covid_overal_vl_data,
infectious_dose=infectious_dose_distribution,
viable_to_RNA_ratio=viable_to_RNA_ratio_distribution,
transmissibility_factor=0.2,
),
viral_load_in_sputum=param_evaluation(
config.virus_distributions['SARS_CoV_2_OMICRON'], 'viral_load_in_sputum'),
infectious_dose=param_evaluation(
config.virus_distributions['SARS_CoV_2_OMICRON'], 'infectious_dose'),
viable_to_RNA_ratio=param_evaluation(
config.virus_distributions['SARS_CoV_2_OMICRON'], 'viable_to_RNA_ratio'),
transmissibility_factor=param_evaluation(
config.virus_distributions['SARS_CoV_2_OMICRON'], 'transmissibility_factor'),
),
}
@ -169,14 +346,33 @@ virus_distributions = {
# https://doi.org/10.4209/aaqr.2020.08.0531
# https://doi.org/10.1080/02786826.2021.1890687
mask_distributions = {
'Type I': mc.Mask(η_inhale=Uniform(0.25, 0.80)),
'FFP2': mc.Mask(η_inhale=Uniform(0.83, 0.91)),
'Cloth': mc.Mask(η_inhale=Uniform(0.05, 0.40), η_exhale=Uniform(0.20, 0.50)),
'Type I': mc.Mask(
η_inhale=param_evaluation(
config.mask_distributions['Type I'], 'η_inhale'),
η_exhale=param_evaluation(
config.mask_distributions['Type I'], 'η_exhale')
if config.mask_distributions['Type I']['Known filtration efficiency of masks when exhaling?'] == 'Yes' else None,
),
'FFP2': mc.Mask(
η_inhale=param_evaluation(
config.mask_distributions['FFP2'], 'η_inhale'),
η_exhale=param_evaluation(
config.mask_distributions['FFP2'], 'η_exhale')
if config.mask_distributions['FFP2']['Known filtration efficiency of masks when exhaling?'] == 'Yes' else None,
),
'Cloth': mc.Mask(
η_inhale=param_evaluation(
config.mask_distributions['Cloth'], 'η_inhale'),
η_exhale=param_evaluation(
config.mask_distributions['Cloth'], 'η_exhale')
if config.mask_distributions['Cloth']['Known filtration efficiency of masks when exhaling?'] == 'Yes' else None,
),
}
def expiration_distribution(
BLO_factors,
d_min=0.1,
d_max=30.,
) -> mc.Expiration:
"""
@ -187,40 +383,67 @@ def expiration_distribution(
an historical choice based on previous implementations of the model
(it limits the influence of the O-mode).
"""
dscan = np.linspace(0.1, d_max, 3000)
dscan = np.linspace(d_min, d_max, 3000)
return mc.Expiration(
CustomKernel(
dscan,
BLOmodel(BLO_factors).distribution(dscan),
kernel_bandwidth=0.1,
),
cn=BLOmodel(BLO_factors).integrate(0.1, d_max),
cn=BLOmodel(BLO_factors).integrate(d_min, d_max),
)
expiration_BLO_factors = {
'Breathing': (1., 0., 0.),
'Speaking': (1., 1., 1.),
'Singing': (1., 5., 5.),
'Shouting': (1., 5., 5.),
'Breathing': (
param_evaluation(config.expiration_BLO_factors['Breathing'], 'B'),
param_evaluation(config.expiration_BLO_factors['Breathing'], 'L'),
param_evaluation(config.expiration_BLO_factors['Breathing'], 'O')
),
'Speaking': (
param_evaluation(config.expiration_BLO_factors['Speaking'], 'B'),
param_evaluation(config.expiration_BLO_factors['Speaking'], 'L'),
param_evaluation(config.expiration_BLO_factors['Speaking'], 'O')
),
'Singing': (
param_evaluation(config.expiration_BLO_factors['Singing'], 'B'),
param_evaluation(config.expiration_BLO_factors['Singing'], 'L'),
param_evaluation(config.expiration_BLO_factors['Singing'], 'O')
),
'Shouting': (
param_evaluation(config.expiration_BLO_factors['Shouting'], 'B'),
param_evaluation(config.expiration_BLO_factors['Shouting'], 'L'),
param_evaluation(config.expiration_BLO_factors['Shouting'], 'O')
),
}
expiration_distributions = {
exp_type: expiration_distribution(BLO_factors)
exp_type: expiration_distribution(BLO_factors,
d_min=param_evaluation(
config.long_range_expiration_distributions, 'minimum_diameter'),
d_max=param_evaluation(config.long_range_expiration_distributions, 'maximum_diameter'))
for exp_type, BLO_factors in expiration_BLO_factors.items()
}
short_range_expiration_distributions = {
exp_type: expiration_distribution(BLO_factors, d_max=100)
exp_type: expiration_distribution(
BLO_factors,
d_min=param_evaluation(
config.short_range_expiration_distributions, 'minimum_diameter'),
d_max=param_evaluation(config.short_range_expiration_distributions, 'maximum_diameter'))
for exp_type, BLO_factors in expiration_BLO_factors.items()
}
# Derived from Fig 8 a) "stand-stand" in https://www.mdpi.com/1660-4601/17/4/1445/htm
distances = np.array((0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2))
frequencies = np.array((0.0598036,0.0946154,0.1299152,0.1064905,0.1099066,0.0998209, 0.0845298,0.0479286,0.0406084,0.039795,0.0205997,0.0152316,0.0118155,0.0118155,0.018485,0.0205997))
short_range_distances = Custom(bounds=(0.5,2.),
function=lambda x: np.interp(x,distances,frequencies,left=0.,right=0.),
max_function=0.13)
distances = np.array((0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2,
1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2))
frequencies = np.array((0.0598036, 0.0946154, 0.1299152, 0.1064905, 0.1099066, 0.0998209, 0.0845298,
0.0479286, 0.0406084, 0.039795, 0.0205997, 0.0152316, 0.0118155, 0.0118155, 0.018485, 0.0205997))
short_range_distances = Custom(bounds=(param_evaluation(config.short_range_distances, 'minimum_distance'),
param_evaluation(config.short_range_distances, 'maximum_distance')),
function=lambda x: np.interp(
x, distances, frequencies, left=0., right=0.),
max_function=0.13)

View file

View file

@ -0,0 +1,451 @@
class Configuration:
"""Configuration singleton to cache data values."""
BLOmodel = {
"cn": {
"B": 0.06,
"L": 0.2,
"O": 0.0010008,
},
"mu": {
"B": 0.989541,
"L": 1.38629,
"O": 4.97673,
},
"sigma": {
"B": 0.262364,
"L": 0.506818,
"O": 0.585005,
},
}
activity_distributions = {
"Seated": {
"inhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": -0.6872121723362303,
"standard_deviation_gaussian": 0.10498338229297108,
},
},
"exhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": -0.6872121723362303,
"standard_deviation_gaussian": 0.10498338229297108,
},
},
},
"Standing": {
"inhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": -0.5742377578494785,
"standard_deviation_gaussian": 0.09373162411398223,
},
},
"exhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": -0.5742377578494785,
"standard_deviation_gaussian": 0.09373162411398223,
},
},
},
"Light activity": {
"inhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": 0.21380242785625422,
"standard_deviation_gaussian": 0.09435378091059601,
},
},
"exhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": 0.21380242785625422,
"standard_deviation_gaussian": 0.09435378091059601,
},
},
},
"Moderate activity": {
"inhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": 0.551771330362601,
"standard_deviation_gaussian": 0.1894616357138137,
},
},
"exhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": 0.551771330362601,
"standard_deviation_gaussian": 0.1894616357138137,
},
},
},
"Heavy exercise": {
"inhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": 1.1644665696723049,
"standard_deviation_gaussian": 0.21744554768657565,
},
},
"exhalation_rate": {
"associated_distribution": "Numpy Log-normal Distribution (random.lognormal)",
"parameters": {
"mean_gaussian": 1.1644665696723049,
"standard_deviation_gaussian": 0.21744554768657565,
},
},
},
}
symptomatic_vl_frequencies = {
"log_variable": [
2.46032,
2.67431,
2.85434,
3.06155,
3.25856,
3.47256,
3.66957,
3.85979,
4.09927,
4.27081,
4.47631,
4.66653,
4.87204,
5.10302,
5.27456,
5.46478,
5.6533,
5.88428,
6.07281,
6.30549,
6.48552,
6.64856,
6.85407,
7.10373,
7.30075,
7.47229,
7.66081,
7.85782,
8.05653,
8.27053,
8.48453,
8.65607,
8.90573,
9.06878,
9.27429,
9.473,
9.66152,
9.87552,
],
"frequencies": [
0.001206885,
0.007851618,
0.008078144,
0.01502491,
0.013258014,
0.018528495,
0.020053765,
0.021896167,
0.022047184,
0.018604005,
0.01547796,
0.018075445,
0.021503523,
0.022349217,
0.025097721,
0.032875078,
0.030594727,
0.032573045,
0.034717482,
0.034792991,
0.033267721,
0.042887485,
0.036846816,
0.03876473,
0.045016819,
0.040063473,
0.04883754,
0.043944602,
0.048142864,
0.041588741,
0.048762031,
0.027921732,
0.033871788,
0.022122693,
0.016927718,
0.008833228,
0.00478598,
0.002807662,
],
"kernel_bandwidth": 0.1,
}
covid_overal_vl_data = {
"shape_factor": 3.47,
"scale_factor": 7.01,
"start": 0.01,
"stop": 0.99,
"num": 30.0,
"min_bound": 2,
"max_bound": 10,
"interpolation_fp_left": 0,
"interpolation_fp_right": 0,
"max_function": 0.2,
}
viable_to_RNA_ratio_distribution = {
"low": 0.01,
"high": 0.6,
}
infectious_dose_distribution = {
"low": 10,
"high": 100,
}
virus_distributions = {
"SARS_CoV_2": {
"viral_load_in_sputum": "Ref: Viral load - covid_overal_vl_data",
"infectious_dose": "Ref: Infectious dose - infectious_dose_distribution",
"viable_to_RNA_ratio": "Ref: Viable to RNA ratio - viable_to_RNA_ratio_distribution",
"transmissibility_factor": 1,
"infectiousness_days": 14,
},
"SARS_CoV_2_ALPHA": {
"viral_load_in_sputum": "Ref: Viral load - covid_overal_vl_data",
"infectious_dose": "Ref: Infectious dose - infectious_dose_distribution",
"viable_to_RNA_ratio": "Ref: Viable to RNA ratio - viable_to_RNA_ratio_distribution",
"transmissibility_factor": 0.78,
"infectiousness_days": 14,
},
"SARS_CoV_2_BETA": {
"viral_load_in_sputum": "Ref: Viral load - covid_overal_vl_data",
"infectious_dose": "Ref: Infectious dose - infectious_dose_distribution",
"viable_to_RNA_ratio": "Ref: Viable to RNA ratio - viable_to_RNA_ratio_distribution",
"transmissibility_factor": 0.8,
"infectiousness_days": 14,
},
"SARS_CoV_2_GAMMA": {
"viral_load_in_sputum": "Ref: Viral load - covid_overal_vl_data",
"infectious_dose": "Ref: Infectious dose - infectious_dose_distribution",
"viable_to_RNA_ratio": "Ref: Viable to RNA ratio - viable_to_RNA_ratio_distribution",
"transmissibility_factor": 0.72,
"infectiousness_days": 14,
},
"SARS_CoV_2_DELTA": {
"viral_load_in_sputum": "Ref: Viral load - covid_overal_vl_data",
"infectious_dose": "Ref: Infectious dose - infectious_dose_distribution",
"viable_to_RNA_ratio": "Ref: Viable to RNA ratio - viable_to_RNA_ratio_distribution",
"transmissibility_factor": 0.51,
"infectiousness_days": 14,
},
"SARS_CoV_2_OMICRON": {
"viral_load_in_sputum": "Ref: Viral load - covid_overal_vl_data",
"infectious_dose": "Ref: Infectious dose - infectious_dose_distribution",
"viable_to_RNA_ratio": "Ref: Viable to RNA ratio - viable_to_RNA_ratio_distribution",
"transmissibility_factor": 0.2,
"infectiousness_days": 14,
},
"SARS_CoV_2_Other": {
"viral_load_in_sputum": "Ref: Viral load - covid_overal_vl_data",
"infectious_dose": "Ref: Infectious dose - infectious_dose_distribution",
"viable_to_RNA_ratio": "Ref: Viable to RNA ratio - viable_to_RNA_ratio_distribution",
"transmissibility_factor": 0.1,
"infectiousness_days": 14,
},
}
mask_distributions = {
"Type I": {
"η_inhale": {
"associated_distribution": "Numpy Uniform Distribution (random.uniform)",
"parameters": {
"low": 0.25,
"high": 0.80,
},
},
"Known filtration efficiency of masks when exhaling?": "No",
"factor_exhale": 1,
},
"FFP2": {
"η_inhale": {
"associated_distribution": "Numpy Uniform Distribution (random.uniform)",
"parameters": {
"low": 0.83,
"high": 0.91,
},
},
"Known filtration efficiency of masks when exhaling?": "No",
"factor_exhale": 1,
},
"Cloth": {
"η_inhale": {
"associated_distribution": "Numpy Uniform Distribution (random.uniform)",
"parameters": {
"low": 0.05,
"high": 0.40,
},
},
"Known filtration efficiency of masks when exhaling?": "Yes",
"η_exhale": {
"associated_distribution": "Numpy Uniform Distribution (random.uniform)",
"parameters": {
"low": 0.20,
"high": 0.50,
},
},
"factor_exhale": 1,
},
}
expiration_BLO_factors = {
"Breathing": {
"B": 1.0,
"L": 0.0,
"O": 0.0,
},
"Speaking": {
"B": 1.0,
"L": 1.0,
"O": 1.0,
},
"Singing": {
"B": 1.0,
"L": 5.0,
"O": 5.0,
},
"Shouting": {
"B": 1.0,
"L": 5.0,
"O": 5.0,
},
}
long_range_expiration_distributions = {
"minimum_diameter": 0.1,
"maximum_diameter": 30,
}
short_range_expiration_distributions = {
"minimum_diameter": 0.1,
"maximum_diameter": 100,
}
short_range_distances = {
"minimum_distance": 0.5,
"maximum_distance": 2.0,
}
####################################
room = {
"defaults": {
"inside_temp": 293,
"humidity_with_heating": 0.3,
"humidity_without_heating": 0.5,
},
}
ventilation = {
"natural": {
"discharge_factor": {
"sliding": 0.6,
},
},
"infiltration_ventilation": 0.25,
}
particle = {
"evaporation_factor": 0.3,
}
population_with_virus = {
"fraction_of_infectious_virus": 1,
}
concentration_model = {
"min_background_concentration": 0.0,
"CO2_concentration_model": {
"CO2_atmosphere_concentration": 440.44,
"CO2_fraction_exhaled": 0.042,
},
}
short_range_model = {
"dilution_factor": {
"mouth_diameter": 0.02,
"exhalation_coefficient": 2,
"tstar": 2,
"penetration_coefficients": {
"𝛽r1": 0.18,
"𝛽r2": 0.2,
"𝛽x1": 2.4,
},
},
}
exposure_model = {
"repeats": 1,
}
conditional_prob_inf_given_viral_load = {
"lower_percentile": 0.05,
"upper_percentile": 0.95,
"min_vl": 2,
"max_vl": 10,
}
monte_carlo_sample_size = 250000
population_scenario_activity = {
"office": {"activity": "Seated", "expiration": {"Speaking": 1, "Breathing": 2}},
"smallmeeting": {
"activity": "Seated",
"expiration": {"Speaking": 1, "Breathing": None},
},
"largemeeting": {
"activity": "Standing",
"expiration": {"Speaking": 1, "Breathing": 2},
},
"callcenter": {"activity": "Seated", "expiration": {"Speaking": 1}},
"controlroom-day": {
"activity": "Seated",
"expiration": {"Speaking": 1, "Breathing": 1},
},
"controlroom-night": {
"activity": "Seated",
"expiration": {"Speaking": 1, "Breathing": 9},
},
"library": {"activity": "Seated", "expiration": {"Breathing": 1}},
"lab": {
"activity": "Light activity",
"expiration": {"Speaking": 1, "Breathing": 1},
},
"workshop": {
"activity": "Moderate activity",
"expiration": {"Speaking": 1, "Breathing": 1},
},
"training": {"activity": "Standing", "expiration": {"Speaking": 1}},
"training_attendee": {"activity": "Seated", "expiration": {"Breathing": 1}},
"gym": {"activity": "Heavy exercise", "expiration": {"Breathing": 1}},
"household-day": {
"activity": "Light activity",
"expiration": {"Breathing": 5, "Speaking": 5},
},
"household-night": {
"activity": "Seated",
"expiration": {"Breathing": 7, "Speaking": 3},
},
"primary-school": {
"activity": "Light activity",
"expiration": {"Breathing": 5, "Speaking": 5},
},
"secondary-school": {
"activity": "Light activity",
"expiration": {"Breathing": 7, "Speaking": 3},
},
"university": {
"activity": "Seated",
"expiration": {"Breathing": 9, "Speaking": 1},
},
"restaurant": {
"activity": "Seated",
"expiration": {"Breathing": 1, "Speaking": 9},
},
"precise": {"activity": '', "expiration": {}},
}
def update(self, data):
"""Update local cache with data provided as argument."""
for attr_name, value in data.items():
setattr(self, attr_name, value)
# module-level variable as a form of singleton
config = Configuration()

View file

@ -0,0 +1,95 @@
import logging
import os
import typing
import requests
from .configuration import config
logger = logging.getLogger(__name__)
class DataService:
"""Responsible for fetching data from the data service endpoint."""
# Cached access token
_access_token: typing.Optional[str] = None
def __init__(
self,
credentials: typing.Dict[str, str],
host: str = "https://caimira-data-api.app.cern.ch",
):
self._credentials = credentials
self._host = host
def _is_valid(self, access_token):
# decode access_token
# check validity
return False
def _login(self):
if self._is_valid(self._access_token):
return self._access_token
# invalid access_token, fetch it again
client_email = self._credentials["email"]
client_password = self._credentials["password"]
if client_email == None or client_password == None:
# If the credentials are not defined, an exception is raised.
raise Exception("DataService credentials not set")
url = f"{self._host}/login"
headers = {"Content-Type": "application/json"}
json_body = dict(email=client_email, password=client_password)
try:
response = requests.post(url, json=json_body, headers=headers)
response.raise_for_status()
if response.status_code == 200:
self._access_token = response.json()["access_token"]
return self._access_token
else:
logger.error(
f"Unexpected error on login. Response status code: {response.status_code}, body: f{response.text}"
)
except requests.exceptions.RequestException as e:
logger.exception(e)
def fetch(self):
access_token = self._login()
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
}
url = f"{self._host}/data"
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
if response.status_code == 200:
return response.json()
else:
logger.error(
f"Unexpected error when fetching data. Response status code: {response.status_code}, body: f{response.text}"
)
except requests.exceptions.RequestException as e:
logger.exception(e)
def update_configuration():
data_service_enabled = os.environ.get("DATA_SERVICE_ENABLED", "False")
is_enabled = data_service_enabled.lower() == "true"
if is_enabled:
credentials = {
"email": os.environ.get("DATA_SERVICE_CLIENT_EMAIL", None),
"password": os.environ.get("DATA_SERVICE_CLIENT_PASSWORD", None),
}
data_service = DataService(credentials)
data = data_service.fetch()
if data:
config.update(data["data"])
else:
logger.error("Could not fetch fresh data from the data service.")

View file

@ -1,87 +1,81 @@
from dataclasses import dataclass
import unittest
from unittest.mock import patch, MagicMock
from tornado.httpclient import HTTPError
from unittest.mock import Mock, patch
from caimira.apps.calculator.data_service import DataService
from caimira.store.data_service import DataService
@dataclass
class MockResponse:
body: str
class DataServiceTests(unittest.TestCase):
def setUp(self):
# Set up any necessary test data or configurations
self.credentials = {
"data_service_client_email": "test@example.com",
"data_service_client_password": "password123"
}
self.credentials = {"email": "test@example.com", "password": "password123"}
self.data_service = DataService(self.credentials)
@patch('caimira.apps.calculator.data_service.AsyncHTTPClient')
async def test_login_successful(self, mock_http_client):
@patch("requests.post")
def test_login_successful(self, mock_post):
# Mock successful login response
mock_response = MockResponse('{"access_token": "dummy_token"}')
mock_fetch = MagicMock(return_value=mock_response)
mock_http_client.return_value.fetch = mock_fetch
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"access_token": "dummy_token"}
mock_post.return_value = mock_response
# Call the login method
access_token = await self.data_service._login()
access_token = self.data_service._login()
# Assert that the access token is returned correctly
self.assertEqual(access_token, "dummy_token")
# Verify that the fetch method was called with the expected arguments
mock_fetch.assert_called_once_with(
url='https://caimira-data-api.app.cern.ch/login',
method='POST',
headers={'Content-type': 'application/json'},
body='{"email": "test@example.com", "password": "password123"}'
mock_post.assert_called_once_with(
"https://caimira-data-api.app.cern.ch/login",
json=dict(email="test@example.com", password="password123"),
headers={"Content-Type": "application/json"},
)
@patch('caimira.apps.calculator.data_service.AsyncHTTPClient')
async def test_login_error(self, mock_http_client):
@patch("requests.post")
def test_login_error(self, mock_post):
# Mock login error response
mock_fetch = MagicMock(side_effect=HTTPError(500))
mock_http_client.return_value.fetch = mock_fetch
mock_post.return_value = Mock()
mock_post.return_value.status_code = 500
# Call the login method
access_token = await self.data_service.login()
access_token = self.data_service._login()
# Assert that the login method returns None in case of an error
self.assertIsNone(access_token)
@patch('caimira.apps.calculator.data_service.AsyncHTTPClient')
async def test_fetch_successful(self, mock_http_client):
@patch("requests.get")
@patch.object(DataService, "_login")
def test_fetch_successful(self, mock_login, mock_get):
# Mock successful fetch response
mock_response = MockResponse('{"data": "dummy_data"}')
mock_fetch = MagicMock(return_value=mock_response)
mock_http_client.return_value.fetch = mock_fetch
mock_get.return_value = Mock()
mock_get.return_value.status_code = 200
mock_get.return_value.json.return_value = {"data": "dummy_data"}
# Call the fetch method with a mock access token
self.data_service._access_token = "dummy_token"
data = await self.data_service.fetch()
mock_login.return_value = "dummy_token"
data = self.data_service.fetch()
# Assert that the data is returned correctly
self.assertEqual(data, {"data": "dummy_data"})
# Verify that the fetch method was called with the expected arguments
mock_fetch.assert_called_once_with(
url='https://caimira-data-api.app.cern.ch/data',
method='GET',
headers={'Authorization': 'Bearer dummy_token'}
mock_get.assert_called_once_with(
"https://caimira-data-api.app.cern.ch/data",
headers={
"Authorization": "Bearer dummy_token",
"Content-Type": "application/json",
},
)
@patch('caimira.apps.calculator.data_service.AsyncHTTPClient')
async def test_fetch_error(self, mock_http_client):
@patch("requests.get")
@patch.object(DataService, "_login")
def test_fetch_error(self, mock_login, mock_get):
# Mock fetch error response
mock_fetch = MagicMock(side_effect=HTTPError(404))
mock_http_client.return_value.fetch = mock_fetch
mock_get.return_value = Mock()
mock_get.return_value.status_code = 500
# Call the fetch method with a mock access token
self.data_service._access_token = "dummy_token"
data = await self.data_service.fetch()
mock_login.return_value = "dummy_token"
data = self.data_service.fetch()
# Assert that the fetch method returns None in case of an error
self.assertIsNone(data)

View file

@ -48,6 +48,7 @@ REQUIREMENTS: dict = {
'numpy-stubs @ git+https://github.com/numpy/numpy-stubs.git',
'types-dataclasses',
'types-python-dateutil',
'types-requests',
],
'dev': [
'jupyterlab',