Skip to content

Analysis API

Placeholder for analysis API docs.

Public analysis helpers for the unified API.

AnalysisResult dataclass

Lightweight container used by Monte Carlo helpers for legacy traces.

Source code in spicelab/analysis/result.py
 9
10
11
12
13
14
@dataclass(frozen=True)
class AnalysisResult:
    """Lightweight container used by Monte Carlo helpers for legacy traces."""

    run: RunResult
    traces: TraceSet

ENOBSpec dataclass

Effective number of bits estimated from SINAD of a sine wave.

ENOB = (SINAD_dB - 1.76) / 6.02

Source code in spicelab/analysis/measure.py
124
125
126
127
128
129
130
131
132
133
134
@dataclass(frozen=True)
class ENOBSpec:
    """Effective number of bits estimated from SINAD of a sine wave.

    ENOB = (SINAD_dB - 1.76) / 6.02
    """

    name: str
    signal: str
    harmonics: int = 5
    f0: float | None = None

GainBandwidthSpec dataclass

Unity-gain frequency for H = numerator/denominator (GBW for open-loop A).

Source code in spicelab/analysis/measure.py
69
70
71
72
73
74
75
@dataclass(frozen=True)
class GainBandwidthSpec:
    """Unity-gain frequency for H = numerator/denominator (GBW for open-loop A)."""

    name: str
    numerator: str
    denominator: str

GainMarginSpec dataclass

Gain margin at phase = -180° (mod 360) for H = numerator/denominator.

Returns the classical GM in dB: GM_dB = -20*log10(|H|) evaluated at the phase-crossing (closest sample to -180° modulo 360). If no sample is within tolerance_deg of -180°, returns +inf (no crossing within range).

Source code in spicelab/analysis/measure.py
78
79
80
81
82
83
84
85
86
87
88
89
90
@dataclass(frozen=True)
class GainMarginSpec:
    """Gain margin at phase = -180° (mod 360) for H = numerator/denominator.

    Returns the classical GM in dB: GM_dB = -20*log10(|H|) evaluated at the
    phase-crossing (closest sample to -180° modulo 360). If no sample is within
    `tolerance_deg` of -180°, returns +inf (no crossing within range).
    """

    name: str
    numerator: str
    denominator: str
    tolerance_deg: float = 15.0

GainSpec dataclass

Measure the small-signal gain at a given frequency.

Source code in spicelab/analysis/measure.py
23
24
25
26
27
28
29
30
31
@dataclass(frozen=True)
class GainSpec:
    """Measure the small-signal gain at a given frequency."""

    name: str
    numerator: str
    freq: float
    denominator: str | None = None
    kind: Literal["mag", "db"] = "db"

MonteCarloResult dataclass

Source code in spicelab/analysis/montecarlo.py
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
@dataclass(frozen=True)
class MonteCarloResult:
    samples: list[dict[str, float]]
    runs: list[AnalysisResult]
    # optional metadata about the varied parameters: list of (label, nominal, dist_repr)
    mapping_manifest: list[tuple[str, float, str]] | None = None
    handles: list[ResultHandle] | None = None
    job: JobResult | None = None

    def result_handles(self) -> list[ResultHandle]:
        """Return the list of ResultHandle objects backing each run (if available)."""

        return list(self.handles or [])

    def to_dataframe(
        self,
        metric: (
            Callable[[AnalysisResult], float | dict[str, Any]]
            | TMapping[str, Callable[[AnalysisResult], Any]]
            | None
        ) = None,
        *,
        trial_name: str = "trial",
        param_prefix: str = "",
        y: Sequence[str] | None = None,
        sample_at: float | None = None,
    ) -> Any:
        """
        Returns a per-trial DataFrame with columns:
          - trial (index within this Monte Carlo run)
          - one column per sampled parameter (from `samples`), optionally prefixed
          - optional metric columns computed from each AnalysisResult
          - optional raw trace columns (final value or sampled at `sample_at` seconds)

        metric:
          - callable → result stored in column 'metric' (float or scalar)
          - mapping name->callable → adds one column per metric name
        y: list of trace names to extract values for each run. If `sample_at` is given,
           the value is linearly interpolated at t=sample_at using the run's time axis;
           otherwise, the last value in the trace is used.
        """
        try:
            pd: Any = importlib.import_module("pandas")
        except Exception:  # pragma: no cover
            pd = None

        rows: list[dict[str, Any]] = []
        for i, (s, run) in enumerate(zip(self.samples, self.runs, strict=False)):
            # copy sampled params; optionally add prefix
            if param_prefix:
                row = {f"{param_prefix}{k}": v for k, v in s.items()}
            else:
                row = dict(s)
            row[trial_name] = i
            if metric is not None:
                if hasattr(metric, "items"):
                    for name, fn in cast(
                        TMapping[str, Callable[[AnalysisResult], Any]], metric
                    ).items():
                        row[name] = fn(run)
                else:
                    m = cast(Callable[[AnalysisResult], Any], metric)(run)
                    if isinstance(m, dict):
                        row.update(m)
                    else:
                        row["metric"] = m

            if y:
                try:
                    import numpy as _np  # local import to avoid hard dep at module import
                except Exception:  # pragma: no cover
                    _np = None  # type: ignore[assignment]

                ts = run.traces
                # pick x axis name
                xname = getattr(ts.x, "name", "time")
                for name in y:
                    vals = ts[name].values
                    if sample_at is not None and _np is not None and xname.lower() == "time":
                        t = ts[xname].values
                        row[name] = float(_np.interp(sample_at, t, vals))
                    else:
                        row[name] = (
                            float(vals[-1]) if len(vals) else _np.nan if _np is not None else 0.0
                        )
            rows.append(row)
        if pd is None:
            return _MiniDataFrame(rows)
        return pd.DataFrame(rows)

    def to_csv(
        self,
        path: str,
        metric: (
            Callable[[AnalysisResult], float | dict[str, Any]]
            | TMapping[str, Callable[[AnalysisResult], Any]]
            | None
        ) = None,
        *,
        trial_name: str = "trial",
        param_prefix: str = "",
        y: Sequence[str] | None = None,
        sample_at: float | None = None,
        columns: Sequence[str] | None = None,
        index: bool = False,
        **to_csv_kwargs: Any,
    ) -> None:
        """Write the Monte Carlo per-trial table to CSV.

        - `path`: output file path (passed to pandas.DataFrame.to_csv).
        - `metric`, `trial_name`, `param_prefix`, `y`, `sample_at` are forwarded
          to :meth:`to_dataframe` and behave the same.
        - `columns`: optional sequence of column names to keep (order preserved).
        - `index`: whether to write the DataFrame index (default False).
        - `to_csv_kwargs`: additional keyword args passed to pandas.DataFrame.to_csv.

        Raises RuntimeError if pandas is not available.
        """
        try:
            importlib.import_module("pandas")
        except Exception as exc:  # pragma: no cover
            raise RuntimeError("pandas is required for MonteCarloResult.to_csv()") from exc

        df = self.to_dataframe(
            metric=metric,
            trial_name=trial_name,
            param_prefix=param_prefix,
            y=y,
            sample_at=sample_at,
        )
        if columns is not None:
            df = df.loc[:, list(columns)]
        df.to_csv(path, index=index, **to_csv_kwargs)

    def save_samples_csv(
        self, path: str, *, param_prefix: str = "", index: bool = False, **to_csv_kwargs: Any
    ) -> None:
        """Write only the sampled parameters (and trial index) to CSV.

        This is a convenience helper that writes the per-trial sampled parameters
        (the entries produced when generating the Monte Carlo `samples`) to a CSV
        file. Columns are the sampled parameter names (optionally prefixed) and
        the trial column named 'trial'.
        """
        try:
            importlib.import_module("pandas")
        except Exception as exc:  # pragma: no cover
            raise RuntimeError(
                "pandas is required for MonteCarloResult.save_samples_csv()"
            ) from exc

        df = self.to_dataframe(metric=None, trial_name="trial", param_prefix=param_prefix, y=None)
        df.to_csv(path, index=index, **to_csv_kwargs)

    def save_manifest_csv(self, path: str, *, index: bool = False, **to_csv_kwargs: Any) -> None:
        """Write a small manifest describing the varied parameters to CSV.

        The manifest columns are: label, nominal, dist. The manifest is taken from
        `mapping_manifest` populated by the `monte_carlo` helper when available.
        """
        try:
            importlib.import_module("pandas")
        except Exception as exc:  # pragma: no cover
            raise RuntimeError(
                "pandas is required for MonteCarloResult.save_manifest_csv()"
            ) from exc

        if not self.mapping_manifest:
            # nothing to write
            return

        import pandas as pd  # local import; optional runtime dependency

        df = pd.DataFrame(self.mapping_manifest, columns=["label", "nominal", "dist"])
        df.to_csv(path, index=index, **to_csv_kwargs)

result_handles()

Return the list of ResultHandle objects backing each run (if available).

Source code in spicelab/analysis/montecarlo.py
136
137
138
139
def result_handles(self) -> list[ResultHandle]:
    """Return the list of ResultHandle objects backing each run (if available)."""

    return list(self.handles or [])

save_manifest_csv(path, *, index=False, **to_csv_kwargs)

Write a small manifest describing the varied parameters to CSV.

The manifest columns are: label, nominal, dist. The manifest is taken from mapping_manifest populated by the monte_carlo helper when available.

Source code in spicelab/analysis/montecarlo.py
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
def save_manifest_csv(self, path: str, *, index: bool = False, **to_csv_kwargs: Any) -> None:
    """Write a small manifest describing the varied parameters to CSV.

    The manifest columns are: label, nominal, dist. The manifest is taken from
    `mapping_manifest` populated by the `monte_carlo` helper when available.
    """
    try:
        importlib.import_module("pandas")
    except Exception as exc:  # pragma: no cover
        raise RuntimeError(
            "pandas is required for MonteCarloResult.save_manifest_csv()"
        ) from exc

    if not self.mapping_manifest:
        # nothing to write
        return

    import pandas as pd  # local import; optional runtime dependency

    df = pd.DataFrame(self.mapping_manifest, columns=["label", "nominal", "dist"])
    df.to_csv(path, index=index, **to_csv_kwargs)

save_samples_csv(path, *, param_prefix='', index=False, **to_csv_kwargs)

Write only the sampled parameters (and trial index) to CSV.

This is a convenience helper that writes the per-trial sampled parameters (the entries produced when generating the Monte Carlo samples) to a CSV file. Columns are the sampled parameter names (optionally prefixed) and the trial column named 'trial'.

Source code in spicelab/analysis/montecarlo.py
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def save_samples_csv(
    self, path: str, *, param_prefix: str = "", index: bool = False, **to_csv_kwargs: Any
) -> None:
    """Write only the sampled parameters (and trial index) to CSV.

    This is a convenience helper that writes the per-trial sampled parameters
    (the entries produced when generating the Monte Carlo `samples`) to a CSV
    file. Columns are the sampled parameter names (optionally prefixed) and
    the trial column named 'trial'.
    """
    try:
        importlib.import_module("pandas")
    except Exception as exc:  # pragma: no cover
        raise RuntimeError(
            "pandas is required for MonteCarloResult.save_samples_csv()"
        ) from exc

    df = self.to_dataframe(metric=None, trial_name="trial", param_prefix=param_prefix, y=None)
    df.to_csv(path, index=index, **to_csv_kwargs)

to_csv(path, metric=None, *, trial_name='trial', param_prefix='', y=None, sample_at=None, columns=None, index=False, **to_csv_kwargs)

Write the Monte Carlo per-trial table to CSV.

  • path: output file path (passed to pandas.DataFrame.to_csv).
  • metric, trial_name, param_prefix, y, sample_at are forwarded to :meth:to_dataframe and behave the same.
  • columns: optional sequence of column names to keep (order preserved).
  • index: whether to write the DataFrame index (default False).
  • to_csv_kwargs: additional keyword args passed to pandas.DataFrame.to_csv.

Raises RuntimeError if pandas is not available.

Source code in spicelab/analysis/montecarlo.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
def to_csv(
    self,
    path: str,
    metric: (
        Callable[[AnalysisResult], float | dict[str, Any]]
        | TMapping[str, Callable[[AnalysisResult], Any]]
        | None
    ) = None,
    *,
    trial_name: str = "trial",
    param_prefix: str = "",
    y: Sequence[str] | None = None,
    sample_at: float | None = None,
    columns: Sequence[str] | None = None,
    index: bool = False,
    **to_csv_kwargs: Any,
) -> None:
    """Write the Monte Carlo per-trial table to CSV.

    - `path`: output file path (passed to pandas.DataFrame.to_csv).
    - `metric`, `trial_name`, `param_prefix`, `y`, `sample_at` are forwarded
      to :meth:`to_dataframe` and behave the same.
    - `columns`: optional sequence of column names to keep (order preserved).
    - `index`: whether to write the DataFrame index (default False).
    - `to_csv_kwargs`: additional keyword args passed to pandas.DataFrame.to_csv.

    Raises RuntimeError if pandas is not available.
    """
    try:
        importlib.import_module("pandas")
    except Exception as exc:  # pragma: no cover
        raise RuntimeError("pandas is required for MonteCarloResult.to_csv()") from exc

    df = self.to_dataframe(
        metric=metric,
        trial_name=trial_name,
        param_prefix=param_prefix,
        y=y,
        sample_at=sample_at,
    )
    if columns is not None:
        df = df.loc[:, list(columns)]
    df.to_csv(path, index=index, **to_csv_kwargs)

to_dataframe(metric=None, *, trial_name='trial', param_prefix='', y=None, sample_at=None)

Returns a per-trial DataFrame with columns
  • trial (index within this Monte Carlo run)
  • one column per sampled parameter (from samples), optionally prefixed
  • optional metric columns computed from each AnalysisResult
  • optional raw trace columns (final value or sampled at sample_at seconds)
metric
  • callable → result stored in column 'metric' (float or scalar)
  • mapping name->callable → adds one column per metric name

y: list of trace names to extract values for each run. If sample_at is given, the value is linearly interpolated at t=sample_at using the run's time axis; otherwise, the last value in the trace is used.

Source code in spicelab/analysis/montecarlo.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def to_dataframe(
    self,
    metric: (
        Callable[[AnalysisResult], float | dict[str, Any]]
        | TMapping[str, Callable[[AnalysisResult], Any]]
        | None
    ) = None,
    *,
    trial_name: str = "trial",
    param_prefix: str = "",
    y: Sequence[str] | None = None,
    sample_at: float | None = None,
) -> Any:
    """
    Returns a per-trial DataFrame with columns:
      - trial (index within this Monte Carlo run)
      - one column per sampled parameter (from `samples`), optionally prefixed
      - optional metric columns computed from each AnalysisResult
      - optional raw trace columns (final value or sampled at `sample_at` seconds)

    metric:
      - callable → result stored in column 'metric' (float or scalar)
      - mapping name->callable → adds one column per metric name
    y: list of trace names to extract values for each run. If `sample_at` is given,
       the value is linearly interpolated at t=sample_at using the run's time axis;
       otherwise, the last value in the trace is used.
    """
    try:
        pd: Any = importlib.import_module("pandas")
    except Exception:  # pragma: no cover
        pd = None

    rows: list[dict[str, Any]] = []
    for i, (s, run) in enumerate(zip(self.samples, self.runs, strict=False)):
        # copy sampled params; optionally add prefix
        if param_prefix:
            row = {f"{param_prefix}{k}": v for k, v in s.items()}
        else:
            row = dict(s)
        row[trial_name] = i
        if metric is not None:
            if hasattr(metric, "items"):
                for name, fn in cast(
                    TMapping[str, Callable[[AnalysisResult], Any]], metric
                ).items():
                    row[name] = fn(run)
            else:
                m = cast(Callable[[AnalysisResult], Any], metric)(run)
                if isinstance(m, dict):
                    row.update(m)
                else:
                    row["metric"] = m

        if y:
            try:
                import numpy as _np  # local import to avoid hard dep at module import
            except Exception:  # pragma: no cover
                _np = None  # type: ignore[assignment]

            ts = run.traces
            # pick x axis name
            xname = getattr(ts.x, "name", "time")
            for name in y:
                vals = ts[name].values
                if sample_at is not None and _np is not None and xname.lower() == "time":
                    t = ts[xname].values
                    row[name] = float(_np.interp(sample_at, t, vals))
                else:
                    row[name] = (
                        float(vals[-1]) if len(vals) else _np.nan if _np is not None else 0.0
                    )
        rows.append(row)
    if pd is None:
        return _MiniDataFrame(rows)
    return pd.DataFrame(rows)

OvershootSpec dataclass

Measure peak overshoot relative to a target value.

Source code in spicelab/analysis/measure.py
34
35
36
37
38
39
40
41
42
@dataclass(frozen=True)
class OvershootSpec:
    """Measure peak overshoot relative to a target value."""

    name: str
    signal: str
    target: float
    reference: float | None = None
    percent: bool = True

PhaseMarginSpec dataclass

Phase margin at unity-gain crossover of H = numerator/denominator.

Returns the classical PM = 180 + angle(H) [deg] at |H| = 1.

Source code in spicelab/analysis/measure.py
57
58
59
60
61
62
63
64
65
66
@dataclass(frozen=True)
class PhaseMarginSpec:
    """Phase margin at unity-gain crossover of H = numerator/denominator.

    Returns the classical PM = 180 + angle(H) [deg] at |H| = 1.
    """

    name: str
    numerator: str
    denominator: str

RiseTimeSpec dataclass

10-90 (or custom) rise time between threshold crossings.

Computes the time difference between the first crossings of low and high thresholds, where thresholds are defined relative to baseline/reference and target.

Source code in spicelab/analysis/measure.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
@dataclass(frozen=True)
class RiseTimeSpec:
    """10-90 (or custom) rise time between threshold crossings.

    Computes the time difference between the first crossings of low and high
    thresholds, where thresholds are defined relative to baseline/reference and
    target.
    """

    name: str
    signal: str
    target: float | None = None
    reference: float | None = None
    low_pct: float = 0.1
    high_pct: float = 0.9

SettlingTimeSpec dataclass

Measure when a signal stays within a tolerance band.

Source code in spicelab/analysis/measure.py
45
46
47
48
49
50
51
52
53
54
@dataclass(frozen=True)
class SettlingTimeSpec:
    """Measure when a signal stays within a tolerance band."""

    name: str
    signal: str
    target: float
    tolerance: float
    tolerance_kind: Literal["abs", "pct"] = "pct"
    start_time: float = 0.0

Statistics dataclass

Statistical summary of a set of values.

Source code in spicelab/analysis/stats.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
@dataclass
class Statistics:
    """Statistical summary of a set of values."""

    n: int
    """Number of samples."""

    mean: float
    """Arithmetic mean."""

    std: float
    """Standard deviation (sample, n-1 denominator)."""

    min: float
    """Minimum value."""

    max: float
    """Maximum value."""

    median: float
    """Median (50th percentile)."""

    p1: float
    """1st percentile."""

    p5: float
    """5th percentile."""

    p95: float
    """95th percentile."""

    p99: float
    """99th percentile."""

    sigma3_low: float
    """Mean - 3*std (lower 3-sigma bound)."""

    sigma3_high: float
    """Mean + 3*std (upper 3-sigma bound)."""

    def __repr__(self) -> str:
        return (
            f"Statistics(n={self.n}, mean={self.mean:.4g}, std={self.std:.4g}, "
            f"range=[{self.min:.4g}, {self.max:.4g}])"
        )

    def cpk(self, lsl: float, usl: float) -> float:
        """Calculate process capability index Cpk.

        Args:
            lsl: Lower specification limit.
            usl: Upper specification limit.

        Returns:
            Cpk value. Higher is better; Cpk >= 1.33 is typically acceptable.
        """
        if self.std == 0:
            return float("inf") if lsl <= self.mean <= usl else 0.0
        cpu = (usl - self.mean) / (3 * self.std)
        cpl = (self.mean - lsl) / (3 * self.std)
        return min(cpu, cpl)

    def yield_estimate(self, lsl: float, usl: float) -> float:
        """Estimate yield (fraction within spec limits).

        Uses normal distribution assumption.

        Args:
            lsl: Lower specification limit.
            usl: Upper specification limit.

        Returns:
            Estimated yield as fraction (0.0 to 1.0).
        """
        if self.std == 0:
            return 1.0 if lsl <= self.mean <= usl else 0.0

        try:
            from scipy.stats import norm

            z_low = (lsl - self.mean) / self.std
            z_high = (usl - self.mean) / self.std
            return float(norm.cdf(z_high) - norm.cdf(z_low))
        except ImportError:
            # Fallback: use empirical estimate if scipy not available
            # This is less accurate but works without scipy
            z_low = (lsl - self.mean) / self.std
            z_high = (usl - self.mean) / self.std
            # Approximate using erf
            return 0.5 * (math.erf(z_high / math.sqrt(2)) - math.erf(z_low / math.sqrt(2)))

max instance-attribute

Maximum value.

mean instance-attribute

Arithmetic mean.

median instance-attribute

Median (50th percentile).

min instance-attribute

Minimum value.

n instance-attribute

Number of samples.

p1 instance-attribute

1st percentile.

p5 instance-attribute

5th percentile.

p95 instance-attribute

95th percentile.

p99 instance-attribute

99th percentile.

sigma3_high instance-attribute

Mean + 3*std (upper 3-sigma bound).

sigma3_low instance-attribute

Mean - 3*std (lower 3-sigma bound).

std instance-attribute

Standard deviation (sample, n-1 denominator).

cpk(lsl, usl)

Calculate process capability index Cpk.

Parameters:

Name Type Description Default
lsl float

Lower specification limit.

required
usl float

Upper specification limit.

required

Returns:

Type Description
float

Cpk value. Higher is better; Cpk >= 1.33 is typically acceptable.

Source code in spicelab/analysis/stats.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
def cpk(self, lsl: float, usl: float) -> float:
    """Calculate process capability index Cpk.

    Args:
        lsl: Lower specification limit.
        usl: Upper specification limit.

    Returns:
        Cpk value. Higher is better; Cpk >= 1.33 is typically acceptable.
    """
    if self.std == 0:
        return float("inf") if lsl <= self.mean <= usl else 0.0
    cpu = (usl - self.mean) / (3 * self.std)
    cpl = (self.mean - lsl) / (3 * self.std)
    return min(cpu, cpl)

yield_estimate(lsl, usl)

Estimate yield (fraction within spec limits).

Uses normal distribution assumption.

Parameters:

Name Type Description Default
lsl float

Lower specification limit.

required
usl float

Upper specification limit.

required

Returns:

Type Description
float

Estimated yield as fraction (0.0 to 1.0).

Source code in spicelab/analysis/stats.py
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
def yield_estimate(self, lsl: float, usl: float) -> float:
    """Estimate yield (fraction within spec limits).

    Uses normal distribution assumption.

    Args:
        lsl: Lower specification limit.
        usl: Upper specification limit.

    Returns:
        Estimated yield as fraction (0.0 to 1.0).
    """
    if self.std == 0:
        return 1.0 if lsl <= self.mean <= usl else 0.0

    try:
        from scipy.stats import norm

        z_low = (lsl - self.mean) / self.std
        z_high = (usl - self.mean) / self.std
        return float(norm.cdf(z_high) - norm.cdf(z_low))
    except ImportError:
        # Fallback: use empirical estimate if scipy not available
        # This is less accurate but works without scipy
        z_low = (lsl - self.mean) / self.std
        z_high = (usl - self.mean) / self.std
        # Approximate using erf
        return 0.5 * (math.erf(z_high / math.sqrt(2)) - math.erf(z_low / math.sqrt(2)))

THDSpec dataclass

Total Harmonic Distortion of a steady-state tone.

Returns THD in percent. Fundamental can be provided (f0); otherwise the dominant bin (ignoring DC) is used. Uses a Hann window by default via FFT helper.

Source code in spicelab/analysis/measure.py
110
111
112
113
114
115
116
117
118
119
120
121
@dataclass(frozen=True)
class THDSpec:
    """Total Harmonic Distortion of a steady-state tone.

    Returns THD in percent. Fundamental can be provided (f0); otherwise the
    dominant bin (ignoring DC) is used. Uses a Hann window by default via FFT helper.
    """

    name: str
    signal: str
    harmonics: int = 5
    f0: float | None = None

WcaCorner dataclass

Represents a single corner in the WCA analysis.

Source code in spicelab/analysis/wca.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
@dataclass(frozen=True)
class WcaCorner:
    """Represents a single corner in the WCA analysis."""

    combo: dict[str, float]
    """Component ref -> value mapping for this corner."""

    corner_signs: dict[str, Literal[-1, 1]]
    """Component ref -> sign (+1 or -1) indicating tolerance direction."""

    handle: ResultHandle
    """Result handle for this corner's simulation."""

    corner_name: str = ""
    """Human-readable corner name, e.g., 'R1+, R2-'."""

    def dataset(self) -> Any:
        """Return the xarray Dataset for this corner."""
        return self.handle.dataset()

    def traces(self) -> TraceSet:
        """Return TraceSet for this corner."""
        return TraceSet.from_dataset(self.handle.dataset())

combo instance-attribute

Component ref -> value mapping for this corner.

corner_name = '' class-attribute instance-attribute

Human-readable corner name, e.g., 'R1+, R2-'.

corner_signs instance-attribute

Component ref -> sign (+1 or -1) indicating tolerance direction.

handle instance-attribute

Result handle for this corner's simulation.

dataset()

Return the xarray Dataset for this corner.

Source code in spicelab/analysis/wca.py
63
64
65
def dataset(self) -> Any:
    """Return the xarray Dataset for this corner."""
    return self.handle.dataset()

traces()

Return TraceSet for this corner.

Source code in spicelab/analysis/wca.py
67
68
69
def traces(self) -> TraceSet:
    """Return TraceSet for this corner."""
    return TraceSet.from_dataset(self.handle.dataset())

WcaResult dataclass

Result of worst-case analysis.

Contains all corner simulations plus methods to find extremes.

Source code in spicelab/analysis/wca.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
@dataclass(frozen=True)
class WcaResult:
    """Result of worst-case analysis.

    Contains all corner simulations plus methods to find extremes.
    """

    corners: list[WcaCorner]
    """All evaluated corners (2^n for n components)."""

    nominal_combo: dict[str, float]
    """Nominal values for each component."""

    tolerances: dict[str, float]
    """Tolerance (as fraction) for each component."""

    nominal_run: AnalysisResult | None = None
    """The nominal simulation result (optional, run if include_nominal=True)."""

    component_labels: dict[str, str] = field(default_factory=dict)
    """Mapping from component ref to display label."""

    def handles(self) -> list[ResultHandle]:
        """Return all result handles."""
        return [c.handle for c in self.corners]

    def find_extreme(
        self,
        metric: Callable[[WcaCorner], float],
        kind: Literal["min", "max"] = "max",
    ) -> WcaCorner:
        """Find the corner that produces the extreme value for a metric.

        Args:
            metric: Function that extracts a scalar from a WcaCorner.
            kind: "min" or "max" to find minimum or maximum.

        Returns:
            The WcaCorner with the extreme metric value.

        Example:
            # Find corner with maximum Vout
            def get_vout(corner):
                ds = corner.dataset()
                return float(ds['V(vout)'].values[0])

            worst = result.find_extreme(get_vout, kind='max')
        """
        if not self.corners:
            raise ValueError("No corners to search")

        if kind == "min":
            return min(self.corners, key=metric)
        return max(self.corners, key=metric)

    def all_values(
        self,
        metric: Callable[[WcaCorner], float],
    ) -> list[tuple[WcaCorner, float]]:
        """Evaluate a metric for all corners and return sorted results.

        Args:
            metric: Function that extracts a scalar from a WcaCorner.

        Returns:
            List of (corner, value) tuples sorted by value ascending.
        """
        results = [(c, metric(c)) for c in self.corners]
        results.sort(key=lambda x: x[1])
        return results

    def bounds(
        self,
        metric: Callable[[WcaCorner], float],
    ) -> tuple[float, float, float | None]:
        """Get min, max, and nominal values for a metric.

        Args:
            metric: Function that extracts a scalar from a WcaCorner.

        Returns:
            Tuple of (min_value, max_value, nominal_value).
            nominal_value is None if nominal_run was not included.
        """
        values = [metric(c) for c in self.corners]
        nominal = None
        if self.nominal_run is not None:
            # Create a fake corner for nominal to use with metric
            # Actually we need to handle nominal differently
            pass
        return (min(values), max(values), nominal)

    def to_dataframe(
        self,
        metric: Callable[[WcaCorner], float | dict[str, Any]] | None = None,
    ) -> Any:
        """Convert results to a pandas DataFrame.

        Args:
            metric: Optional function to extract metrics from each corner.
                   Can return a single float or a dict of values.

        Returns:
            DataFrame with columns for corner name, component values,
            and optional metric values.
        """
        try:
            import pandas as pd
        except ImportError as err:
            raise RuntimeError("pandas is required for to_dataframe()") from err

        rows = []
        for corner in self.corners:
            row: dict[str, Any] = {
                "corner": corner.corner_name,
            }
            # Add component values
            for ref, value in corner.combo.items():
                label = self.component_labels.get(ref, ref)
                row[label] = value

            # Add metric if provided
            if metric is not None:
                m = metric(corner)
                if isinstance(m, dict):
                    row.update(m)
                else:
                    row["metric"] = m

            rows.append(row)

        return pd.DataFrame(rows)

    def summary(
        self,
        metric: Callable[[WcaCorner], float],
        metric_name: str = "metric",
    ) -> dict[str, Any]:
        """Generate a summary of the WCA results.

        Args:
            metric: Function to extract the metric of interest.
            metric_name: Name of the metric for display.

        Returns:
            Dict with min, max, nominal, range, and worst corners.
        """
        values = self.all_values(metric)
        min_corner, min_val = values[0]
        max_corner, max_val = values[-1]

        nominal_val = None
        if self.nominal_run is not None:
            # Extract nominal from nominal_run traces
            pass

        return {
            "metric_name": metric_name,
            "min_value": min_val,
            "max_value": max_val,
            "range": max_val - min_val,
            "min_corner": min_corner.corner_name,
            "max_corner": max_corner.corner_name,
            "nominal_value": nominal_val,
            "n_corners": len(self.corners),
        }

component_labels = field(default_factory=dict) class-attribute instance-attribute

Mapping from component ref to display label.

corners instance-attribute

All evaluated corners (2^n for n components).

nominal_combo instance-attribute

Nominal values for each component.

nominal_run = None class-attribute instance-attribute

The nominal simulation result (optional, run if include_nominal=True).

tolerances instance-attribute

Tolerance (as fraction) for each component.

all_values(metric)

Evaluate a metric for all corners and return sorted results.

Parameters:

Name Type Description Default
metric Callable[[WcaCorner], float]

Function that extracts a scalar from a WcaCorner.

required

Returns:

Type Description
list[tuple[WcaCorner, float]]

List of (corner, value) tuples sorted by value ascending.

Source code in spicelab/analysis/wca.py
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
def all_values(
    self,
    metric: Callable[[WcaCorner], float],
) -> list[tuple[WcaCorner, float]]:
    """Evaluate a metric for all corners and return sorted results.

    Args:
        metric: Function that extracts a scalar from a WcaCorner.

    Returns:
        List of (corner, value) tuples sorted by value ascending.
    """
    results = [(c, metric(c)) for c in self.corners]
    results.sort(key=lambda x: x[1])
    return results

bounds(metric)

Get min, max, and nominal values for a metric.

Parameters:

Name Type Description Default
metric Callable[[WcaCorner], float]

Function that extracts a scalar from a WcaCorner.

required

Returns:

Type Description
float

Tuple of (min_value, max_value, nominal_value).

float

nominal_value is None if nominal_run was not included.

Source code in spicelab/analysis/wca.py
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
def bounds(
    self,
    metric: Callable[[WcaCorner], float],
) -> tuple[float, float, float | None]:
    """Get min, max, and nominal values for a metric.

    Args:
        metric: Function that extracts a scalar from a WcaCorner.

    Returns:
        Tuple of (min_value, max_value, nominal_value).
        nominal_value is None if nominal_run was not included.
    """
    values = [metric(c) for c in self.corners]
    nominal = None
    if self.nominal_run is not None:
        # Create a fake corner for nominal to use with metric
        # Actually we need to handle nominal differently
        pass
    return (min(values), max(values), nominal)

find_extreme(metric, kind='max')

Find the corner that produces the extreme value for a metric.

Parameters:

Name Type Description Default
metric Callable[[WcaCorner], float]

Function that extracts a scalar from a WcaCorner.

required
kind Literal['min', 'max']

"min" or "max" to find minimum or maximum.

'max'

Returns:

Type Description
WcaCorner

The WcaCorner with the extreme metric value.

Example

Find corner with maximum Vout

def get_vout(corner): ds = corner.dataset() return float(ds['V(vout)'].values[0])

worst = result.find_extreme(get_vout, kind='max')

Source code in spicelab/analysis/wca.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
def find_extreme(
    self,
    metric: Callable[[WcaCorner], float],
    kind: Literal["min", "max"] = "max",
) -> WcaCorner:
    """Find the corner that produces the extreme value for a metric.

    Args:
        metric: Function that extracts a scalar from a WcaCorner.
        kind: "min" or "max" to find minimum or maximum.

    Returns:
        The WcaCorner with the extreme metric value.

    Example:
        # Find corner with maximum Vout
        def get_vout(corner):
            ds = corner.dataset()
            return float(ds['V(vout)'].values[0])

        worst = result.find_extreme(get_vout, kind='max')
    """
    if not self.corners:
        raise ValueError("No corners to search")

    if kind == "min":
        return min(self.corners, key=metric)
    return max(self.corners, key=metric)

handles()

Return all result handles.

Source code in spicelab/analysis/wca.py
94
95
96
def handles(self) -> list[ResultHandle]:
    """Return all result handles."""
    return [c.handle for c in self.corners]

summary(metric, metric_name='metric')

Generate a summary of the WCA results.

Parameters:

Name Type Description Default
metric Callable[[WcaCorner], float]

Function to extract the metric of interest.

required
metric_name str

Name of the metric for display.

'metric'

Returns:

Type Description
dict[str, Any]

Dict with min, max, nominal, range, and worst corners.

Source code in spicelab/analysis/wca.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
def summary(
    self,
    metric: Callable[[WcaCorner], float],
    metric_name: str = "metric",
) -> dict[str, Any]:
    """Generate a summary of the WCA results.

    Args:
        metric: Function to extract the metric of interest.
        metric_name: Name of the metric for display.

    Returns:
        Dict with min, max, nominal, range, and worst corners.
    """
    values = self.all_values(metric)
    min_corner, min_val = values[0]
    max_corner, max_val = values[-1]

    nominal_val = None
    if self.nominal_run is not None:
        # Extract nominal from nominal_run traces
        pass

    return {
        "metric_name": metric_name,
        "min_value": min_val,
        "max_value": max_val,
        "range": max_val - min_val,
        "min_corner": min_corner.corner_name,
        "max_corner": max_corner.corner_name,
        "nominal_value": nominal_val,
        "n_corners": len(self.corners),
    }

to_dataframe(metric=None)

Convert results to a pandas DataFrame.

Parameters:

Name Type Description Default
metric Callable[[WcaCorner], float | dict[str, Any]] | None

Optional function to extract metrics from each corner. Can return a single float or a dict of values.

None

Returns:

Type Description
Any

DataFrame with columns for corner name, component values,

Any

and optional metric values.

Source code in spicelab/analysis/wca.py
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def to_dataframe(
    self,
    metric: Callable[[WcaCorner], float | dict[str, Any]] | None = None,
) -> Any:
    """Convert results to a pandas DataFrame.

    Args:
        metric: Optional function to extract metrics from each corner.
               Can return a single float or a dict of values.

    Returns:
        DataFrame with columns for corner name, component values,
        and optional metric values.
    """
    try:
        import pandas as pd
    except ImportError as err:
        raise RuntimeError("pandas is required for to_dataframe()") from err

    rows = []
    for corner in self.corners:
        row: dict[str, Any] = {
            "corner": corner.corner_name,
        }
        # Add component values
        for ref, value in corner.combo.items():
            label = self.component_labels.get(ref, ref)
            row[label] = value

        # Add metric if provided
        if metric is not None:
            m = metric(corner)
            if isinstance(m, dict):
                row.update(m)
            else:
                row["metric"] = m

        rows.append(row)

    return pd.DataFrame(rows)

amplitude_spectrum(x, fs, *, win='hann')

Return (freq, |X(f)|) using rfft_coherent.

The magnitude is 2*|FFT|/sum(window) as in rfft_coherent().

Source code in spicelab/analysis/signal.py
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
def amplitude_spectrum(
    x: np.ndarray, fs: float, *, win: WindowKind = "hann"
) -> tuple[np.ndarray, np.ndarray]:
    """Return (freq, |X(f)|) using rfft_coherent.

    The magnitude is 2*|FFT|/sum(window) as in rfft_coherent().
    """
    res = rfft_coherent(x, fs, win=win)
    mag = np.abs(res.spectrum)
    # fix DC and Nyquist scaling (factor 2 not applied there in one-sided spectrum)
    if mag.size > 0:
        mag[0] = np.abs(np.fft.rfft(x * window(x.size, win))[0]) / np.sum(window(x.size, win))
    if mag.size > 2 and np.isclose(res.freq[-1], fs / 2):
        mag[-1] = np.abs(np.fft.rfft(x * window(x.size, win))[-1]) / np.sum(window(x.size, win))
    return res.freq, mag

compute_stats(values)

Compute comprehensive statistics for a set of values.

Parameters:

Name Type Description Default
values Sequence[float]

Sequence of numeric values.

required

Returns:

Type Description
Statistics

Statistics dataclass with all computed metrics.

Example

values = [1.0, 1.1, 0.9, 1.05, 0.95] stats = compute_stats(values) print(f"Mean: {stats.mean}, Std: {stats.std}")

Source code in spicelab/analysis/stats.py
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
def compute_stats(values: Sequence[float]) -> Statistics:
    """Compute comprehensive statistics for a set of values.

    Args:
        values: Sequence of numeric values.

    Returns:
        Statistics dataclass with all computed metrics.

    Example:
        values = [1.0, 1.1, 0.9, 1.05, 0.95]
        stats = compute_stats(values)
        print(f"Mean: {stats.mean}, Std: {stats.std}")
    """
    if not values:
        raise ValueError("values must not be empty")

    n = len(values)
    sorted_values = sorted(values)

    # Basic stats
    mean = sum(values) / n

    if n > 1:
        variance = sum((x - mean) ** 2 for x in values) / (n - 1)
        std = math.sqrt(variance)
    else:
        std = 0.0

    min_val = sorted_values[0]
    max_val = sorted_values[-1]

    # Percentiles using linear interpolation
    def percentile(p: float) -> float:
        if n == 1:
            return sorted_values[0]
        idx = (n - 1) * p / 100.0
        lower = int(idx)
        upper = lower + 1
        if upper >= n:
            return sorted_values[-1]
        frac = idx - lower
        return sorted_values[lower] * (1 - frac) + sorted_values[upper] * frac

    return Statistics(
        n=n,
        mean=mean,
        std=std,
        min=min_val,
        max=max_val,
        median=percentile(50),
        p1=percentile(1),
        p5=percentile(5),
        p95=percentile(95),
        p99=percentile(99),
        sigma3_low=mean - 3 * std,
        sigma3_high=mean + 3 * std,
    )

create_metric_extractor(trace_name, *, index=-1, at_time=None)

Create a reusable metric extractor function.

Useful for passing to MonteCarloResult.to_dataframe() or similar methods.

Parameters:

Name Type Description Default
trace_name str

Name of the trace.

required
index int

Index to extract.

-1
at_time float | None

Time to interpolate at.

None

Returns:

Type Description
Callable[[AnalysisResult], float]

A callable that extracts the specified value from an AnalysisResult.

Example

from spicelab.analysis import monte_carlo, create_metric_extractor

get_vout = create_metric_extractor('V(vout)')

mc_result = monte_carlo(...) df = mc_result.to_dataframe(metric=get_vout)

Source code in spicelab/analysis/stats.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
def create_metric_extractor(
    trace_name: str,
    *,
    index: int = -1,
    at_time: float | None = None,
) -> Callable[[AnalysisResult], float]:
    """Create a reusable metric extractor function.

    Useful for passing to MonteCarloResult.to_dataframe() or similar methods.

    Args:
        trace_name: Name of the trace.
        index: Index to extract.
        at_time: Time to interpolate at.

    Returns:
        A callable that extracts the specified value from an AnalysisResult.

    Example:
        from spicelab.analysis import monte_carlo, create_metric_extractor

        get_vout = create_metric_extractor('V(vout)')

        mc_result = monte_carlo(...)
        df = mc_result.to_dataframe(metric=get_vout)
    """

    def extractor(result: AnalysisResult) -> float:
        return extract_from_analysis(result, trace_name, index=index, at_time=at_time)

    return extractor

extract_from_analysis(result, trace_name, **kwargs)

Extract a trace value from an AnalysisResult.

Convenience wrapper around extract_trace_value.

Parameters:

Name Type Description Default
result AnalysisResult

AnalysisResult containing traces.

required
trace_name str

Name of the trace.

required
**kwargs Any

Passed to extract_trace_value.

{}

Returns:

Type Description
float

The extracted scalar value.

Source code in spicelab/analysis/stats.py
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def extract_from_analysis(
    result: AnalysisResult,
    trace_name: str,
    **kwargs: Any,
) -> float:
    """Extract a trace value from an AnalysisResult.

    Convenience wrapper around extract_trace_value.

    Args:
        result: AnalysisResult containing traces.
        trace_name: Name of the trace.
        **kwargs: Passed to extract_trace_value.

    Returns:
        The extracted scalar value.
    """
    return extract_trace_value(result.traces, trace_name, **kwargs)

extract_trace_value(traces, trace_name, *, index=-1, at_time=None)

Extract a scalar value from a trace.

Parameters:

Name Type Description Default
traces TraceSet

TraceSet containing simulation results.

required
trace_name str

Name of the trace (e.g., 'V(vout)', 'I(R1)'). Case-insensitive matching is attempted.

required
index int

Index to extract if at_time is not specified. Default -1 extracts the final value.

-1
at_time float | None

If specified, interpolate the value at this time.

None

Returns:

Type Description
float

The extracted scalar value.

Raises:

Type Description
KeyError

If trace_name is not found (after case-insensitive search).

Example

Get final Vout from OP analysis

vout = extract_trace_value(traces, 'V(vout)')

Get value at t=1ms from transient

vout_1ms = extract_trace_value(traces, 'V(vout)', at_time=1e-3)

Source code in spicelab/analysis/stats.py
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
def extract_trace_value(
    traces: TraceSet,
    trace_name: str,
    *,
    index: int = -1,
    at_time: float | None = None,
) -> float:
    """Extract a scalar value from a trace.

    Args:
        traces: TraceSet containing simulation results.
        trace_name: Name of the trace (e.g., 'V(vout)', 'I(R1)').
                   Case-insensitive matching is attempted.
        index: Index to extract if at_time is not specified.
               Default -1 extracts the final value.
        at_time: If specified, interpolate the value at this time.

    Returns:
        The extracted scalar value.

    Raises:
        KeyError: If trace_name is not found (after case-insensitive search).

    Example:
        # Get final Vout from OP analysis
        vout = extract_trace_value(traces, 'V(vout)')

        # Get value at t=1ms from transient
        vout_1ms = extract_trace_value(traces, 'V(vout)', at_time=1e-3)
    """
    # Get available trace names
    available_names = traces.names

    # Try exact match first
    if trace_name in available_names:
        values = traces[trace_name].values
    else:
        # Try case-insensitive match
        trace_lower = trace_name.lower()
        found = None
        for name in available_names:
            if name.lower() == trace_lower:
                found = name
                break
        if found is None:
            # Try partial match (e.g., 'vout' matches 'V(vout)')
            for name in available_names:
                if trace_lower in name.lower():
                    found = name
                    break
        if found is None:
            raise KeyError(f"Trace '{trace_name}' not found. Available: {available_names}")
        values = traces[found].values

    if at_time is not None:
        try:
            import numpy as np

            # Get time array
            time_name = None
            for name in ["time", "Time", "TIME"]:
                if name in available_names:
                    time_name = name
                    break
            if time_name is None:
                # Try x attribute
                if hasattr(traces, "x") and traces.x is not None:
                    time_arr = traces.x.values
                else:
                    raise ValueError("No time axis found for interpolation")
            else:
                time_arr = traces[time_name].values

            return float(np.interp(at_time, time_arr, values))
        except ImportError as err:
            raise RuntimeError("numpy is required for time interpolation") from err

    return float(values[index])

mc_summary(runs, trace_name, *, index=-1, at_time=None, lsl=None, usl=None)

Generate a summary of Monte Carlo results for a specific trace.

Parameters:

Name Type Description Default
runs Sequence[AnalysisResult]

List of AnalysisResult from Monte Carlo.

required
trace_name str

Name of the trace to analyze.

required
index int

Index to extract from each trace.

-1
at_time float | None

If specified, interpolate at this time.

None
lsl float | None

Lower specification limit (optional).

None
usl float | None

Upper specification limit (optional).

None

Returns:

Type Description
dict[str, Any]

Dictionary with statistics and optional yield/Cpk metrics.

Example

from spicelab.analysis import monte_carlo, mc_summary

mc_result = monte_carlo(circuit, mapping, n=1000, analyses=...)

summary = mc_summary( mc_result.runs, 'V(vout)', lsl=2.4, usl=2.6, )

print(f"Mean Vout: {summary['mean']:.4f} V") print(f"Cpk: {summary['cpk']:.2f}") print(f"Estimated yield: {summary['yield_pct']:.2f}%")

Source code in spicelab/analysis/stats.py
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
def mc_summary(
    runs: Sequence[AnalysisResult],
    trace_name: str,
    *,
    index: int = -1,
    at_time: float | None = None,
    lsl: float | None = None,
    usl: float | None = None,
) -> dict[str, Any]:
    """Generate a summary of Monte Carlo results for a specific trace.

    Args:
        runs: List of AnalysisResult from Monte Carlo.
        trace_name: Name of the trace to analyze.
        index: Index to extract from each trace.
        at_time: If specified, interpolate at this time.
        lsl: Lower specification limit (optional).
        usl: Upper specification limit (optional).

    Returns:
        Dictionary with statistics and optional yield/Cpk metrics.

    Example:
        from spicelab.analysis import monte_carlo, mc_summary

        mc_result = monte_carlo(circuit, mapping, n=1000, analyses=...)

        summary = mc_summary(
            mc_result.runs,
            'V(vout)',
            lsl=2.4,
            usl=2.6,
        )

        print(f"Mean Vout: {summary['mean']:.4f} V")
        print(f"Cpk: {summary['cpk']:.2f}")
        print(f"Estimated yield: {summary['yield_pct']:.2f}%")
    """
    values = [extract_from_analysis(run, trace_name, index=index, at_time=at_time) for run in runs]

    stats = compute_stats(values)

    result: dict[str, Any] = {
        "trace": trace_name,
        "n": stats.n,
        "mean": stats.mean,
        "std": stats.std,
        "min": stats.min,
        "max": stats.max,
        "median": stats.median,
        "p1": stats.p1,
        "p5": stats.p5,
        "p95": stats.p95,
        "p99": stats.p99,
        "sigma3_low": stats.sigma3_low,
        "sigma3_high": stats.sigma3_high,
        "range": stats.max - stats.min,
    }

    if lsl is not None and usl is not None:
        result["lsl"] = lsl
        result["usl"] = usl
        result["cpk"] = stats.cpk(lsl, usl)
        result["yield"] = stats.yield_estimate(lsl, usl)
        result["yield_pct"] = stats.yield_estimate(lsl, usl) * 100

        # Count actual failures
        failures = sum(1 for v in values if v < lsl or v > usl)
        result["failures"] = failures
        result["actual_yield_pct"] = (len(values) - failures) / len(values) * 100

    return result

measure_job_result(result, specs, *, return_as='python', param_prefix='param_')

Evaluate measurement specs for each run in a JobResult and aggregate rows.

Each output row includes the measurement fields plus the sweep parameters from the corresponding combo, prefixed by param_prefix (set to None to avoid prefixing). The return type mirrors :func:spicelab.analysis.measure.measure (polars or list[dict]).

Source code in spicelab/analysis/pipeline.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def measure_job_result(
    result: JobResult,
    specs: Sequence[Spec],
    *,
    return_as: Literal["python", "polars"] = "python",
    param_prefix: str | None = "param_",
) -> Any:
    """Evaluate measurement ``specs`` for each run in a JobResult and aggregate rows.

    Each output row includes the measurement fields plus the sweep parameters from the
    corresponding combo, prefixed by ``param_prefix`` (set to None to avoid prefixing).
    The return type mirrors :func:`spicelab.analysis.measure.measure` (polars or list[dict]).
    """
    all_rows: list[dict[str, Any]] = []
    for run in result.runs:
        ds = run.handle.dataset()
        rows = measure(ds, specs, return_as="python")
        params = _flatten_params(run.combo, prefix=param_prefix)
        for r in rows:
            all_rows.append({**params, **r})
    if return_as == "python":
        return all_rows
    try:
        import polars as pl
    except Exception as exc:  # pragma: no cover - optional dependency
        raise RuntimeError("polars is required when return_as='polars'") from exc
    # For polars, apply a stable column ordering: param_* first (sorted),
    # then measure fields via CLI's order
    from ..cli.measure import _order_columns as _cli_order_columns

    # Union of keys
    seen: set[str] = set()
    keys: list[str] = []
    for r in all_rows:
        for k in r.keys():
            if k not in seen:
                seen.add(k)
                keys.append(k)
    params_cols = sorted(
        [k for k in keys if isinstance(k, str) and k.startswith(str(param_prefix or "param_"))]
    )
    measure_cols = [k for k in keys if k not in params_cols]
    ordered_measure = _cli_order_columns([c for c in measure_cols if isinstance(c, str)])
    cols = params_cols + ordered_measure
    df = pl.DataFrame(all_rows)
    # Keep only existing columns, in the desired order
    existing = [c for c in cols if c in df.columns]
    return df.select(existing)

monte_carlo(circuit, mapping, n, seed=None, label_fn=None, workers=1, progress=None, *, analyses=None, engine='ngspice', cache_dir='.spicelab_cache', reuse_cache=True)

Executa Monte Carlo variando valores dos componentes conforme distribuições.

Source code in spicelab/analysis/montecarlo.py
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
def monte_carlo(
    circuit: Circuit,
    mapping: Mapping[Component, Dist],
    n: int,
    seed: int | None = None,
    label_fn: Callable[[Component], str] | None = None,
    workers: int = 1,
    progress: bool | Callable[[int, int], None] | None = None,
    *,
    analyses: Sequence[AnalysisSpec] | None = None,
    engine: EngineName = "ngspice",
    cache_dir: str | Path | None = ".spicelab_cache",
    reuse_cache: bool = True,
) -> MonteCarloResult:
    """
    Executa Monte Carlo variando valores dos componentes conforme distribuições.
    """
    if analyses is None:
        raise ValueError("Provide 'analyses' when running monte_carlo")

    rnd = _random.Random(seed)

    def _label(c: Component) -> str:
        if label_fn:
            return label_fn(c)
        return f"{type(c).__name__}.{c.ref}"

    comps: list[Component] = list(mapping.keys())
    nominals: list[float] = [_as_float(c.value) for c in comps]
    dists: list[Dist] = [mapping[c] for c in comps]

    ref_lookup: dict[Component, str] = {}
    for comp in comps:
        ref = getattr(comp, "ref", None)
        if ref is None:
            raise ValueError("All components in mapping must have .ref for Monte Carlo jobs")
        ref_lookup[comp] = str(ref)

    samples: list[dict[str, float]] = []
    combos: list[dict[str, float]] = []
    for _ in range(n):
        s: dict[str, float] = {}
        combo: dict[str, float] = {}
        for comp, nominal, dist in zip(comps, nominals, dists, strict=False):
            sampled = dist.sample(nominal, rnd)
            s[_label(comp)] = sampled
            combo[ref_lookup[comp]] = sampled
        samples.append(s)
        combos.append(combo)

    # build optional manifest: list of (label, nominal, dist_repr)
    manifest: list[tuple[str, float, str]] = []
    for c, nom, d in zip(comps, nominals, dists, strict=False):
        try:
            d_repr = repr(d)
        except Exception:
            d_repr = type(d).__name__
        manifest.append((_label(c), nom, d_repr))

    if n <= 0:
        return MonteCarloResult(
            samples=samples,
            runs=[],
            mapping_manifest=manifest,
            handles=[],
            job=None,
        )

    job = Job(
        circuit=circuit,
        analyses=list(analyses),
        engine=engine,
        combos=tuple(dict(combo) for combo in combos),
    )
    job_result = run_job(
        job,
        cache_dir=cache_dir,
        workers=workers,
        progress=progress,
        reuse_cache=reuse_cache,
    )
    handles: list[ResultHandle] = []
    analysis_runs: list[AnalysisResult] = []
    for job_run in job_result.runs:
        handles.append(job_run.handle)
        analysis_runs.append(_handle_to_analysis_result(job_run.handle))
    if len(analysis_runs) != len(samples):
        raise RuntimeError("Mismatch between Monte Carlo samples and job results")
    return MonteCarloResult(
        samples=samples,
        runs=analysis_runs,
        mapping_manifest=manifest,
        handles=handles,
        job=job_result,
    )

power_spectral_density(x, fs, *, win='hann')

Return (freq, PSD) with a simple window-energy normalization.

PSD here is magnitude-squared normalized by ENBW of the window to yield units ~ V^2/Hz.

Source code in spicelab/analysis/signal.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def power_spectral_density(
    x: np.ndarray, fs: float, *, win: WindowKind = "hann"
) -> tuple[np.ndarray, np.ndarray]:
    """Return (freq, PSD) with a simple window-energy normalization.

    PSD here is magnitude-squared normalized by ENBW of the window to yield units ~ V^2/Hz.
    """
    n = x.size
    if n == 0:
        raise ValueError("x must be non-empty")
    w = window(n, win)
    enbw = fs * np.sum(w**2) / (np.sum(w) ** 2)
    res = rfft_coherent(x, fs, win=win)
    psd = (np.abs(res.spectrum) ** 2) / enbw
    return res.freq, psd

rfft_coherent(x, fs, *, win='hann')

One-sided FFT with simple coherent gain correction.

Parameters

x: input samples (1D) fs: sample rate in Hz win: window type

Notes

  • Applies window and divides by sum(window) to correct coherent gain (CG).
  • Returns frequency axis (0..fs/2) and complex spectrum (one-sided, DC..Nyquist).
Source code in spicelab/analysis/signal.py
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def rfft_coherent(x: np.ndarray, fs: float, *, win: WindowKind = "hann") -> FFTResult:
    """One-sided FFT with simple coherent gain correction.

    Parameters
    ----------
    x: input samples (1D)
    fs: sample rate in Hz
    win: window type

    Notes
    -----
    - Applies window and divides by sum(window) to correct coherent gain (CG).
    - Returns frequency axis (0..fs/2) and complex spectrum (one-sided, DC..Nyquist).
    """
    if x.ndim != 1:
        raise ValueError("x must be 1D")
    n = x.size
    if n == 0:
        raise ValueError("x must be non-empty")
    w = window(n, win)
    cg = np.sum(w)
    xw = x * w
    spec = np.fft.rfft(xw) / cg * 2.0  # scale to roughly preserve amplitude (except DC/Nyquist)
    freq = np.fft.rfftfreq(n, d=1.0 / fs)
    return FFTResult(freq=freq, spectrum=spec)

run_and_measure(job, specs, *, cache_dir='.spicelab_cache', workers=1, reuse_cache=True, return_as='python', param_prefix='param_')

Execute a job with the orchestrator and measure outputs for each combo.

Convenience wrapper around :func:run_job + :func:measure_job_result.

Source code in spicelab/analysis/pipeline.py
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def run_and_measure(
    job: Job,
    specs: Sequence[Spec],
    *,
    cache_dir: str | None = ".spicelab_cache",
    workers: int = 1,
    reuse_cache: bool = True,
    return_as: Literal["python", "polars"] = "python",
    param_prefix: str | None = "param_",
) -> Any:
    """Execute a job with the orchestrator and measure outputs for each combo.

    Convenience wrapper around :func:`run_job` + :func:`measure_job_result`.
    """
    jr = run_job(job, cache_dir=cache_dir, workers=workers, reuse_cache=reuse_cache)
    return measure_job_result(jr, specs, return_as=return_as, param_prefix=param_prefix)

run_param_grid(circuit, variables, analyses, *, engine='ngspice', progress=None, cache_dir=None, workers=1, reuse_cache=True)

Run a Cartesian product of component.value assignments.

variables: sequence of (component, values) pairs.

Source code in spicelab/analysis/sweep_grid.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
def run_param_grid(
    circuit: object,
    variables: Sequence[tuple[Component, Sequence[str | float]]],
    analyses: Sequence[AnalysisSpec],
    *,
    engine: EngineName = "ngspice",
    progress: bool | Callable[[int, int], None] | None = None,
    cache_dir: str | Path | None = None,
    workers: int = 1,
    reuse_cache: bool = True,
) -> GridResult:
    """Run a Cartesian product of component.value assignments.

    variables: sequence of (component, values) pairs.
    """

    # Prepare original values to restore later
    var_map: dict[str, list[str | float]] = {}
    for comp, vals in variables:
        var_map[str(comp.ref)] = list(vals)

    sweep_spec = SweepSpec(variables=var_map)
    job = run_simulation(
        circuit,
        analyses,
        sweep=sweep_spec,
        engine=engine,
        progress=progress,
        cache_dir=cache_dir,
        workers=workers,
        reuse_cache=reuse_cache,
    )
    if not isinstance(job, JobResult):
        raise RuntimeError("Expected JobResult from run_simulation when sweep is provided")
    grid_runs: list[GridRun] = []
    for job_run in job.runs:
        grid_runs.append(GridRun(combo=dict(job_run.combo), handle=job_run.handle))

    return GridResult(runs=grid_runs)

run_value_sweep(circuit, component, values, analyses, *, engine='ngspice', progress=None, cache_dir=None, workers=1, reuse_cache=True)

Run multiple simulations varying a single component value.

  • Mutates component.value for each run; restores the original value at the end.
  • Uses the unified engine API (get_simulator().run(...)).
  • Returns lightweight handles; you can pull xarray datasets from each when needed.
Source code in spicelab/analysis/sweep_grid.py
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def run_value_sweep(
    circuit: object,
    component: Component,
    values: Sequence[str | float],
    analyses: Sequence[AnalysisSpec],
    *,
    engine: EngineName = "ngspice",
    progress: bool | Callable[[int, int], None] | None = None,
    cache_dir: str | Path | None = None,
    workers: int = 1,
    reuse_cache: bool = True,
) -> SweepResult:
    """Run multiple simulations varying a single component value.

    - Mutates component.value for each run; restores the original value at the end.
    - Uses the unified engine API (get_simulator().run(...)).
    - Returns lightweight handles; you can pull xarray datasets from each when needed.
    """

    sweep_spec = SweepSpec(variables={str(component.ref): list(values)})
    result = run_simulation(
        circuit,
        analyses,
        sweep=sweep_spec,
        engine=engine,
        progress=progress,
        cache_dir=cache_dir,
        workers=workers,
        reuse_cache=reuse_cache,
    )
    if not isinstance(result, JobResult):
        raise RuntimeError("Expected JobResult from run_simulation when sweep is provided")
    runs: list[SweepRun] = []
    for job_run in result.runs:
        value = job_run.combo.get(str(component.ref))
        runs.append(SweepRun(value=value if value is not None else "", handle=job_run.handle))
    return SweepResult(component_ref=str(component.ref), values=list(values), runs=runs)

run_wca(circuit, tolerances, analyses, *, engine='ngspice', include_nominal=True, label_fn=None, workers=1, progress=None, cache_dir='.spicelab_cache', reuse_cache=True)

Run worst-case analysis varying components to their tolerance extremes.

For n components with tolerances, this runs 2^n simulations (all combinations of +tolerance and -tolerance). This gives the absolute worst-case bounds.

Parameters:

Name Type Description Default
circuit Circuit

The circuit to analyze.

required
tolerances Mapping[Component, float]

Mapping from Component -> tolerance as fraction (e.g., 0.01 for 1%).

required
analyses Sequence[AnalysisSpec]

List of analyses to run (e.g., [AnalysisSpec(mode='op')]).

required
engine EngineName

Simulation engine to use.

'ngspice'
include_nominal bool

If True, also run the nominal case.

True
label_fn Callable[[Component], str] | None

Optional function to generate labels for components.

None
workers int

Number of parallel workers.

1
progress bool | Callable[[int, int], None] | None

Progress callback or True for default progress.

None
cache_dir str | Path | None

Directory for caching results.

'.spicelab_cache'
reuse_cache bool

Whether to reuse cached results.

True

Returns:

Type Description
WcaResult

WcaResult containing all corner simulations.

Example

tolerances = { resistor1: 0.01, # 1% resistor2: 0.05, # 5% }

result = run_wca( circuit, tolerances=tolerances, analyses=[AnalysisSpec(mode='op')], )

Find worst case for output voltage

def get_vout(corner): return float(corner.dataset()['V(vout)'].values[0])

worst_high = result.find_extreme(get_vout, kind='max') worst_low = result.find_extreme(get_vout, kind='min')

Source code in spicelab/analysis/wca.py
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
def run_wca(
    circuit: Circuit,
    tolerances: Mapping[Component, float],
    analyses: Sequence[AnalysisSpec],
    *,
    engine: EngineName = "ngspice",
    include_nominal: bool = True,
    label_fn: Callable[[Component], str] | None = None,
    workers: int = 1,
    progress: bool | Callable[[int, int], None] | None = None,
    cache_dir: str | Path | None = ".spicelab_cache",
    reuse_cache: bool = True,
) -> WcaResult:
    """Run worst-case analysis varying components to their tolerance extremes.

    For n components with tolerances, this runs 2^n simulations (all combinations
    of +tolerance and -tolerance). This gives the absolute worst-case bounds.

    Args:
        circuit: The circuit to analyze.
        tolerances: Mapping from Component -> tolerance as fraction (e.g., 0.01 for 1%).
        analyses: List of analyses to run (e.g., [AnalysisSpec(mode='op')]).
        engine: Simulation engine to use.
        include_nominal: If True, also run the nominal case.
        label_fn: Optional function to generate labels for components.
        workers: Number of parallel workers.
        progress: Progress callback or True for default progress.
        cache_dir: Directory for caching results.
        reuse_cache: Whether to reuse cached results.

    Returns:
        WcaResult containing all corner simulations.

    Example:
        tolerances = {
            resistor1: 0.01,  # 1%
            resistor2: 0.05,  # 5%
        }

        result = run_wca(
            circuit,
            tolerances=tolerances,
            analyses=[AnalysisSpec(mode='op')],
        )

        # Find worst case for output voltage
        def get_vout(corner):
            return float(corner.dataset()['V(vout)'].values[0])

        worst_high = result.find_extreme(get_vout, kind='max')
        worst_low = result.find_extreme(get_vout, kind='min')
    """
    if not tolerances:
        raise ValueError("tolerances must not be empty")

    def _label(c: Component) -> str:
        if label_fn:
            return label_fn(c)
        return f"{type(c).__name__}.{c.ref}"

    # Extract component info
    components = list(tolerances.keys())
    refs = [str(c.ref) for c in components]
    nominals = {str(c.ref): to_float(c.value) for c in components}
    tols = {str(c.ref): tolerances[c] for c in components}
    labels = {str(c.ref): _label(c) for c in components}

    # Generate all corner combinations: each component at +tol or -tol
    # For n components, we have 2^n corners
    n = len(components)
    combos: list[dict[str, float]] = []
    corner_signs_list: list[dict[str, Literal[-1, 1]]] = []
    corner_names: list[str] = []

    for signs in itertools.product([-1, 1], repeat=n):
        combo: dict[str, float] = {}
        corner_signs: dict[str, Literal[-1, 1]] = {}
        name_parts = []

        for ref, sign in zip(refs, signs, strict=False):
            nom = nominals[ref]
            tol = tols[ref]
            value = nom * (1 + sign * tol)
            combo[ref] = value
            corner_signs[ref] = sign  # type: ignore[assignment]
            sign_str = "+" if sign > 0 else "-"
            name_parts.append(f"{labels[ref]}{sign_str}")

        combos.append(combo)
        corner_signs_list.append(corner_signs)
        corner_names.append(", ".join(name_parts))

    # Optionally add nominal
    nominal_result: AnalysisResult | None = None
    if include_nominal:
        combos.insert(0, dict(nominals))
        corner_signs_list.insert(0, {ref: 1 for ref in refs})  # type: ignore[misc]
        corner_names.insert(0, "nominal")

    # Run all simulations using the job system

    # Create sweep spec with explicit combos
    # We need to use the job orchestrator directly for explicit combos
    from ..orchestrator import Job, run_job

    job = Job(
        circuit=circuit,
        analyses=list(analyses),
        engine=engine,
        combos=tuple(combos),
    )

    job_result = run_job(
        job,
        cache_dir=cache_dir,
        workers=workers,
        progress=progress,
        reuse_cache=reuse_cache,
    )

    if len(job_result.runs) != len(combos):
        raise RuntimeError(f"Mismatch: expected {len(combos)} runs, got {len(job_result.runs)}")

    # Build corners list
    corners: list[WcaCorner] = []
    start_idx = 0

    if include_nominal:
        # First run is nominal
        nominal_result = _handle_to_analysis_result(job_result.runs[0].handle)
        start_idx = 1

    for i, job_run in enumerate(job_result.runs[start_idx:], start=start_idx):
        idx = i if include_nominal else i
        corner = WcaCorner(
            combo=combos[idx],
            corner_signs=corner_signs_list[idx],
            handle=job_run.handle,
            corner_name=corner_names[idx],
        )
        corners.append(corner)

    return WcaResult(
        corners=corners,
        nominal_combo=dict(nominals),
        tolerances=dict(tols),
        nominal_run=nominal_result,
        component_labels=labels,
    )

tolerance_to_normal(tolerance, sigma_multiplier=3.0)

Convert a tolerance specification to a NormalPct sigma_pct value.

By default, assumes 3-sigma specification (99.7% of values within tolerance).

Parameters:

Name Type Description Default
tolerance float

Tolerance as fraction (e.g., 0.01 for 1%).

required
sigma_multiplier float

Number of sigmas that correspond to tolerance. Default 3.0 means 3σ = tolerance.

3.0

Returns:

Type Description
float

sigma_pct value for use with NormalPct.

Example

from spicelab.analysis import NormalPct, tolerance_to_normal

1% tolerance, 3-sigma -> sigma_pct = 0.333%

sigma = tolerance_to_normal(0.01) # returns 0.00333... dist = NormalPct(sigma)

Source code in spicelab/analysis/wca.py
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
def tolerance_to_normal(tolerance: float, sigma_multiplier: float = 3.0) -> float:
    """Convert a tolerance specification to a NormalPct sigma_pct value.

    By default, assumes 3-sigma specification (99.7% of values within tolerance).

    Args:
        tolerance: Tolerance as fraction (e.g., 0.01 for 1%).
        sigma_multiplier: Number of sigmas that correspond to tolerance.
                         Default 3.0 means 3σ = tolerance.

    Returns:
        sigma_pct value for use with NormalPct.

    Example:
        from spicelab.analysis import NormalPct, tolerance_to_normal

        # 1% tolerance, 3-sigma -> sigma_pct = 0.333%
        sigma = tolerance_to_normal(0.01)  # returns 0.00333...
        dist = NormalPct(sigma)
    """
    if tolerance < 0:
        raise ValueError("tolerance must be >= 0")
    if sigma_multiplier <= 0:
        raise ValueError("sigma_multiplier must be > 0")
    return tolerance / sigma_multiplier

tolerance_to_uniform(tolerance)

Convert a tolerance specification to a UniformPct pct value.

For uniform distribution, the tolerance directly maps to the pct parameter.

Parameters:

Name Type Description Default
tolerance float

Tolerance as fraction (e.g., 0.01 for 1%).

required

Returns:

Type Description
float

pct value for use with UniformPct.

Example

from spicelab.analysis import UniformPct, tolerance_to_uniform

1% tolerance -> uniform distribution ±1%

pct = tolerance_to_uniform(0.01) # returns 0.01 dist = UniformPct(pct)

Source code in spicelab/analysis/wca.py
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
def tolerance_to_uniform(tolerance: float) -> float:
    """Convert a tolerance specification to a UniformPct pct value.

    For uniform distribution, the tolerance directly maps to the pct parameter.

    Args:
        tolerance: Tolerance as fraction (e.g., 0.01 for 1%).

    Returns:
        pct value for use with UniformPct.

    Example:
        from spicelab.analysis import UniformPct, tolerance_to_uniform

        # 1% tolerance -> uniform distribution ±1%
        pct = tolerance_to_uniform(0.01)  # returns 0.01
        dist = UniformPct(pct)
    """
    if tolerance < 0:
        raise ValueError("tolerance must be >= 0")
    return tolerance

window(n, kind='hann')

Return a window with n points.

  • rect: ones
  • hann: 0.5 * (1 - cos(2pin/N))
  • hamming: 0.54 - 0.46 * cos(2pin/N)
  • blackman: 0.42 - 0.5cos(2pin/N) + 0.08cos(4pin/N)
Source code in spicelab/analysis/signal.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def window(n: int, kind: WindowKind = "hann") -> np.ndarray:
    """Return a window with n points.

    - rect: ones
    - hann: 0.5 * (1 - cos(2*pi*n/N))
    - hamming: 0.54 - 0.46 * cos(2*pi*n/N)
    - blackman: 0.42 - 0.5*cos(2*pi*n/N) + 0.08*cos(4*pi*n/N)
    """
    if n <= 0:
        raise ValueError("window length must be > 0")
    if kind == "rect":
        return np.ones(n, dtype=float)
    k = np.arange(n, dtype=float)
    if kind == "hann":
        return 0.5 * (1.0 - np.cos(2.0 * np.pi * k / (n - 1)))
    if kind == "hamming":
        return 0.54 - 0.46 * np.cos(2.0 * np.pi * k / (n - 1))
    if kind == "blackman":
        return (
            0.42
            - 0.5 * np.cos(2.0 * np.pi * k / (n - 1))
            + 0.08 * np.cos(4.0 * np.pi * k / (n - 1))
        )
    raise ValueError(f"unsupported window '{kind}'")