cosmicraysandearthquakes/tests/conftest.py
root e5a812fa14 Initial commit: full analysis pipeline source code
Scripts 01-08 implement the complete cosmic-ray/earthquake correlation
analysis from data ingestion through out-of-sample validation and
combined timeseries sinusoid fitting.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-22 02:45:10 +02:00

122 lines
3.5 KiB
Python

"""Shared pytest fixtures."""
from __future__ import annotations
import textwrap
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
# ---------------------------------------------------------------------------
# Minimal NMDB CSV content (as saved by download_station_year)
# ---------------------------------------------------------------------------
NMDB_CSV_CONTENT = textwrap.dedent("""\
start_date_time;OULU
2019-01-01 00:59:00;6850.00
2019-01-01 01:59:00;6860.00
2019-01-01 02:59:00;6840.00
2019-01-01 03:59:00;6855.00
2019-01-01 04:59:00;6870.00
2019-01-01 05:59:00;6845.00
2019-01-01 06:59:00;6862.00
2019-01-01 07:59:00;6858.00
2019-01-01 08:59:00;6871.00
2019-01-01 09:59:00;6849.00
2019-01-01 10:59:00;6853.00
2019-01-01 11:59:00;6861.00
2019-01-01 12:59:00;6844.00
2019-01-01 13:59:00;6857.00
2019-01-01 14:59:00;6866.00
2019-01-01 15:59:00;6842.00
2019-01-01 16:59:00;6869.00
2019-01-01 17:59:00;6851.00
2019-01-01 18:59:00;6863.00
2019-01-01 19:59:00;6847.00
2019-01-01 20:59:00;6875.00
2019-01-01 21:59:00;6843.00
2019-01-01 22:59:00;6867.00
2019-01-01 23:59:00;6854.00
""")
# One day with only 12 valid hours (rows 12-23 are missing)
NMDB_CSV_PARTIAL = textwrap.dedent("""\
start_date_time;OULU
2019-01-01 00:59:00;6850.00
2019-01-01 01:59:00;6860.00
2019-01-01 02:59:00;6840.00
2019-01-01 03:59:00;6855.00
2019-01-01 04:59:00;6870.00
2019-01-01 05:59:00;6845.00
2019-01-01 06:59:00;6862.00
2019-01-01 07:59:00;6858.00
2019-01-01 08:59:00;6871.00
2019-01-01 09:59:00;6849.00
2019-01-01 10:59:00;6853.00
2019-01-01 11:59:00;6861.00
""")
# Two days: day 1 has 24 valid hours; day 2 has only 10 valid hours
NMDB_CSV_TWO_DAYS = textwrap.dedent("""\
start_date_time;OULU
2019-01-01 00:59:00;6850.00
2019-01-01 01:59:00;6860.00
2019-01-01 02:59:00;6840.00
2019-01-01 03:59:00;6855.00
2019-01-01 04:59:00;6870.00
2019-01-01 05:59:00;6845.00
2019-01-01 06:59:00;6862.00
2019-01-01 07:59:00;6858.00
2019-01-01 08:59:00;6871.00
2019-01-01 09:59:00;6849.00
2019-01-01 10:59:00;6853.00
2019-01-01 11:59:00;6861.00
2019-01-01 12:59:00;6844.00
2019-01-01 13:59:00;6857.00
2019-01-01 14:59:00;6866.00
2019-01-01 15:59:00;6842.00
2019-01-01 16:59:00;6869.00
2019-01-01 17:59:00;6851.00
2019-01-01 18:59:00;6863.00
2019-01-01 19:59:00;6847.00
2019-01-01 20:59:00;6875.00
2019-01-01 21:59:00;6843.00
2019-01-01 22:59:00;6867.00
2019-01-01 23:59:00;6854.00
2019-01-02 00:59:00;6851.00
2019-01-02 01:59:00;6849.00
2019-01-02 02:59:00;6853.00
2019-01-02 03:59:00;6858.00
2019-01-02 04:59:00;6862.00
2019-01-02 05:59:00;6845.00
2019-01-02 06:59:00;6870.00
2019-01-02 07:59:00;6854.00
2019-01-02 08:59:00;6867.00
2019-01-02 09:59:00;6843.00
""")
@pytest.fixture
def nmdb_csv_file(tmp_path: Path) -> Path:
"""Write a full-coverage NMDB CSV to a temp file and return the path."""
p = tmp_path / "OULU2019.csv"
p.write_text(NMDB_CSV_CONTENT)
return p
@pytest.fixture
def nmdb_csv_partial(tmp_path: Path) -> Path:
"""Write a partial-coverage (12/24 hours) NMDB CSV."""
p = tmp_path / "OULU2019_partial.csv"
p.write_text(NMDB_CSV_PARTIAL)
return p
@pytest.fixture
def nmdb_csv_two_days(tmp_path: Path) -> Path:
"""Write a two-day NMDB CSV: day1=full, day2=10/24 hours."""
p = tmp_path / "OULU2019_twodays.csv"
p.write_text(NMDB_CSV_TWO_DAYS)
return p