aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJ08nY2025-10-25 16:21:50 +0200
committerJ08nY2025-10-25 16:21:50 +0200
commite94b083c3d71c2023043bbd20fec6c64b11abb53 (patch)
tree2269bdfe0afca2a71f283a052da835c6606f6fb7
parent4bf6a31c00a90cb5aea54c7a37c95f8f3413faaf (diff)
downloadECTester-e94b083c3d71c2023043bbd20fec6c64b11abb53.tar.gz
ECTester-e94b083c3d71c2023043bbd20fec6c64b11abb53.tar.zst
ECTester-e94b083c3d71c2023043bbd20fec6c64b11abb53.zip
-rw-r--r--analysis/scalarmults/epare/__init__.py7
-rw-r--r--analysis/scalarmults/epare/config.py73
-rw-r--r--analysis/scalarmults/epare/error_model.py33
-rw-r--r--analysis/scalarmults/epare/mult_results.py7
-rw-r--r--analysis/scalarmults/epare/prob_map.py11
-rw-r--r--analysis/scalarmults/epare/simulate.py91
6 files changed, 156 insertions, 66 deletions
diff --git a/analysis/scalarmults/epare/__init__.py b/analysis/scalarmults/epare/__init__.py
index 631c2fd..f6be00a 100644
--- a/analysis/scalarmults/epare/__init__.py
+++ b/analysis/scalarmults/epare/__init__.py
@@ -2,20 +2,15 @@ import itertools
from statsmodels.stats.proportion import proportion_confint
from pyecsca.ec.mult import (
- DoubleAndAddMultiplier,
LTRMultiplier,
RTLMultiplier,
- LadderMultiplier,
BinaryNAFMultiplier,
WindowNAFMultiplier,
SimpleLadderMultiplier,
- DifferentialLadderMultiplier,
CoronMultiplier,
FixedWindowLTRMultiplier,
FullPrecompMultiplier,
ProcessingDirection,
- AccumulationOrder,
- ScalarMultiplier,
SlidingWindowMultiplier,
BGMWMultiplier,
CombMultiplier,
@@ -160,7 +155,7 @@ def _all_mults_with_ctr():
other_ctr = CountermeasureIdent(other_ctr_class, *mults)
result.append(other_ctr)
continue
-
+
mults = [mult] * other_ctr_class.nmults
other_ctr = CountermeasureIdent(other_ctr_class, *mults)
for i in range(1, 2**one_ctr_class.nmults):
diff --git a/analysis/scalarmults/epare/config.py b/analysis/scalarmults/epare/config.py
index 569a1fa..280e0cd 100644
--- a/analysis/scalarmults/epare/config.py
+++ b/analysis/scalarmults/epare/config.py
@@ -1,12 +1,21 @@
from dataclasses import dataclass
from enum import Enum
-from functools import partial, total_ordering
+from functools import total_ordering
from typing import Any, Optional, Type
-from pyecsca.ec.countermeasures import GroupScalarRandomization, AdditiveSplitting, MultiplicativeSplitting, EuclideanSplitting, BrumleyTuveri, PointBlinding, ScalarMultiplierCountermeasure
+from pyecsca.ec.countermeasures import (
+ GroupScalarRandomization,
+ AdditiveSplitting,
+ MultiplicativeSplitting,
+ EuclideanSplitting,
+ BrumleyTuveri,
+ PointBlinding,
+ ScalarMultiplierCountermeasure,
+)
from pyecsca.ec.mult import ScalarMultiplier
from .error_model import ErrorModel
+
class Composable:
klass: Type
args: list[Any]
@@ -56,9 +65,6 @@ class Composable:
return NotImplemented
return str(self) < str(other)
- def __hash__(self):
- return hash((self.klass, tuple(self.args)), tuple(self.kwargs.keys()), tuple(self.kwargs.values()))
-
@dataclass(frozen=True)
@total_ordering
@@ -99,18 +105,42 @@ class CountermeasureIdent(Composable):
else:
name = "?"
# Only print other Composables as Countermeasures do not have interesting arguments
- args = (",".join(list(map(str, filter(lambda arg: isinstance(arg, Composable), self.args))))) if self.args else ""
+ args = (
+ (
+ ",".join(
+ list(
+ map(
+ str,
+ filter(lambda arg: isinstance(arg, Composable), self.args),
+ )
+ )
+ )
+ )
+ if self.args
+ else ""
+ )
# Same for kwargs
- kwargs = (",".join(f"{k}={v}" for k, v in self.kwargs if isinstance(v, Composable))) if self.kwargs else ""
+ kwargs = (
+ (",".join(f"{k}={v}" for k, v in self.kwargs if isinstance(v, Composable)))
+ if self.kwargs
+ else ""
+ )
return f"{name}({args}{',' if args and kwargs else ''}{kwargs})"
def __repr__(self):
return str(self)
def __hash__(self):
- return hash((self.klass, tuple(self.args), tuple(self.kwargs.keys()), tuple(self.kwargs.values())))
+ return hash(
+ (
+ self.klass,
+ tuple(self.args),
+ tuple(self.kwargs.keys()),
+ tuple(self.kwargs.values()),
+ )
+ )
+
-
@dataclass(frozen=True)
@total_ordering
class MultIdent(Composable):
@@ -133,17 +163,31 @@ class MultIdent(Composable):
def __str__(self):
name = self.klass.__name__.replace("Multiplier", "")
args = (",".join(list(map(str, self.args)))) if self.args else ""
- kwmap = {"recoding_direction": "recode",
- "direction": "dir",
- "width": "w"}
- kwargs = (",".join(f"{kwmap.get(k, k)}:{v.name if isinstance(v, Enum) else str(v)}" for k,v in self.kwargs.items())) if self.kwargs else ""
+ kwmap = {"recoding_direction": "recode", "direction": "dir", "width": "w"}
+ kwargs = (
+ (
+ ",".join(
+ f"{kwmap.get(k, k)}:{v.name if isinstance(v, Enum) else str(v)}"
+ for k, v in self.kwargs.items()
+ )
+ )
+ if self.kwargs
+ else ""
+ )
return f"{name}({args}{',' if args and kwargs else ''}{kwargs})"
def __repr__(self):
return str(self)
def __hash__(self):
- return hash((self.klass, tuple(self.args), tuple(self.kwargs.keys()), tuple(self.kwargs.values())))
+ return hash(
+ (
+ self.klass,
+ tuple(self.args),
+ tuple(self.kwargs.keys()),
+ tuple(self.kwargs.values()),
+ )
+ )
@dataclass(frozen=True)
@@ -161,6 +205,7 @@ class Config:
implementation we care about when reverse-engineering: the multiplier and the countermeasure, we do not
really care about the error model, yet need it when simulating.
"""
+
composition: Composable
error_model: Optional[ErrorModel] = None
diff --git a/analysis/scalarmults/epare/error_model.py b/analysis/scalarmults/epare/error_model.py
index a7f5edd..2445dda 100644
--- a/analysis/scalarmults/epare/error_model.py
+++ b/analysis/scalarmults/epare/error_model.py
@@ -14,7 +14,7 @@ def check_divides(k, l, q):
def check_half_add(k, l, q):
- return (q % 2 == 0) and ((k-l) % (q//2)) == 0
+ return (q % 2 == 0) and ((k - l) % (q // 2)) == 0
def check_affine(k, q):
@@ -24,11 +24,13 @@ def check_affine(k, q):
def check_any(*checks, q=None):
"""Merge multiple checks together. The returned check function no longer takes the `q` parameter."""
+
def check_func(k, l):
for check in checks:
if check(k, l, q):
return True
return False
+
return check_func
@@ -36,13 +38,12 @@ def check_any(*checks, q=None):
checks_add = {
"equal_multiples": check_equal_multiples,
"divides": check_divides,
- "half_add": check_half_add
+ "half_add": check_half_add,
}
# This check can be applied to conversion to affine.
-checks_affine = {
- "affine": check_affine
-}
+checks_affine = {"affine": check_affine}
+
@dataclass(frozen=True)
@total_ordering
@@ -61,16 +62,22 @@ class ErrorModel:
to affine form. If it does, it means that additional checks on all outputs of the precomputation are done as
they have to be "convertible" to affine form.
"""
+
checks: set[str]
check_condition: Union[Literal["all"], Literal["necessary"]]
precomp_to_affine: bool
- def __init__(self, checks: set[str], check_condition: Union[Literal["all"], Literal["necessary"]], precomp_to_affine: bool):
+ def __init__(
+ self,
+ checks: set[str],
+ check_condition: Union[Literal["all"], Literal["necessary"]],
+ precomp_to_affine: bool,
+ ):
for check in checks:
if check not in checks_add:
raise ValueError(f"Unknown check: {check}")
checks = set(checks)
- checks.add("affine") # always done in our model
+ checks.add("affine") # always done in our model
object.__setattr__(self, "checks", checks)
if check_condition not in ("all", "necessary"):
raise ValueError("Wrong check_condition")
@@ -81,7 +88,13 @@ class ErrorModel:
"""Get the add formula check function for the given q."""
if self.checks == {"affine"}:
return lambda k, l: False
- return check_any(*map(lambda name: checks_add[name], filter(lambda check: check in checks_add, self.checks)), q=q)
+ return check_any(
+ *map(
+ lambda name: checks_add[name],
+ filter(lambda check: check in checks_add, self.checks),
+ ),
+ q=q,
+ )
def check_affine(self, q):
"""Get the to-affine check function for the given q."""
@@ -106,4 +119,6 @@ class ErrorModel:
return f"({','.join(cs)}+{self.check_condition}{precomp})"
def __hash__(self):
- return hash((tuple(sorted(self.checks)), self.check_condition, self.precomp_to_affine)) \ No newline at end of file
+ return hash(
+ (tuple(sorted(self.checks)), self.check_condition, self.precomp_to_affine)
+ )
diff --git a/analysis/scalarmults/epare/mult_results.py b/analysis/scalarmults/epare/mult_results.py
index 5424b01..dd93deb 100644
--- a/analysis/scalarmults/epare/mult_results.py
+++ b/analysis/scalarmults/epare/mult_results.py
@@ -15,6 +15,7 @@ class MultResults:
in a dict[Config, MultResults]. The `samples` describe how many computations
are contained and must correspond to the length of the `multiplications` list.
"""
+
multiplications: list[tuple[MultipleContext, MultipleContext, Point]]
samples: int
duration: Optional[float] = None
@@ -33,8 +34,10 @@ class MultResults:
return self.multiplications[i]
def __str__(self):
- duration = timedelta(seconds=int(self.duration)) if self.duration is not None else ""
+ duration = (
+ timedelta(seconds=int(self.duration)) if self.duration is not None else ""
+ )
return f"MultResults({self.samples},{duration})"
def __repr__(self):
- return str(self) \ No newline at end of file
+ return str(self)
diff --git a/analysis/scalarmults/epare/prob_map.py b/analysis/scalarmults/epare/prob_map.py
index eb96dda..f0df3ed 100644
--- a/analysis/scalarmults/epare/prob_map.py
+++ b/analysis/scalarmults/epare/prob_map.py
@@ -16,6 +16,7 @@ class ProbMap:
same divisor set and updates the probabilities to a weighted average of the two probability maps
(the weight is the number of samples).
"""
+
probs: dict[int, float]
divisors_hash: bytes
samples: int
@@ -54,12 +55,16 @@ class ProbMap:
def merge(self, other: "ProbMap") -> None:
"""Merge the `other` probability map into this one (must share the divisor set)."""
if self.divisors_hash != other.divisors_hash:
- raise ValueError("Merging can only work on probmaps created for same divisors.")
+ raise ValueError(
+ "Merging can only work on probmaps created for same divisors."
+ )
new_keys = set(self.keys()).union(other.keys())
result = {}
for key in new_keys:
sk = self[key]
ok = other[key]
- result[key] = (sk * self.samples + ok * other.samples) / (self.samples + other.samples)
+ result[key] = (sk * self.samples + ok * other.samples) / (
+ self.samples + other.samples
+ )
self.probs = result
- self.samples += other.samples \ No newline at end of file
+ self.samples += other.samples
diff --git a/analysis/scalarmults/epare/simulate.py b/analysis/scalarmults/epare/simulate.py
index 9b78436..7fa094e 100644
--- a/analysis/scalarmults/epare/simulate.py
+++ b/analysis/scalarmults/epare/simulate.py
@@ -1,20 +1,26 @@
+import hashlib
import random
import pickle
+
from functools import partial
from .config import Config
from .mult_results import MultResults
+from .prob_map import ProbMap
from pyecsca.ec.params import DomainParameters
from pyecsca.ec.mod import mod
from pyecsca.sca.re.rpa import multiple_graph
+from pyecsca.sca.re.epa import graph_to_check_inputs, evaluate_checks
-def simulate_multiples(mult: Config,
- params: DomainParameters,
- bits: int,
- samples: int = 100,
- seed: bytes | None = None) -> MultResults:
+def simulate_multiples(
+ mult: Config,
+ params: DomainParameters,
+ bits: int,
+ samples: int = 100,
+ seed: bytes | None = None,
+) -> MultResults:
"""
Takes a Config, which specifies a scalar multiplier (with optional countermeasures)
and simulates `samples` scalar multiplications, while tracking which multiples of the
@@ -34,16 +40,22 @@ def simulate_multiples(mult: Config,
scalars = [one for _ in range(samples)]
for scalar in scalars:
- results.append(multiple_graph(scalar, params, mult.mult.klass, partial(mult.partial, rng=rng)))
+ results.append(
+ multiple_graph(
+ scalar, params, mult.mult.klass, partial(mult.partial, rng=rng)
+ )
+ )
return MultResults(results, samples)
-def simulate_multiples_direct(mult: Config,
- params: DomainParameters,
- bits: int,
- fname: str,
- samples: int = 100,
- seed: bytes | None = None) -> str:
+def simulate_multiples_direct(
+ mult: Config,
+ params: DomainParameters,
+ bits: int,
+ fname: str,
+ samples: int = 100,
+ seed: bytes | None = None,
+) -> str:
"""
Like the `simulate_multiples` function above, but stores the pickled output directly
into a file named `fname`.
@@ -54,11 +66,13 @@ def simulate_multiples_direct(mult: Config,
return fname
-def evaluate_multiples(mult: Config,
- res: MultResults,
- divisors: set[int],
- use_init: bool = True,
- use_multiply: bool = True):
+def evaluate_multiples(
+ mult: Config,
+ res: MultResults,
+ divisors: set[int],
+ use_init: bool = True,
+ use_multiply: bool = True,
+):
"""
Takes MultIdent and MultResults and a set of divisors (base point orders `q`) and
evaluates them using the error model from the MultIdent. Note that the MultIdent
@@ -66,16 +80,27 @@ def evaluate_multiples(mult: Config,
"""
errors = {divisor: 0 for divisor in divisors}
samples = len(res)
- divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()
+ divisors_hash = hashlib.blake2b(
+ str(sorted(divisors)).encode(), digest_size=8
+ ).digest()
for precomp_ctx, full_ctx, out in res:
- check_inputs = graph_to_check_inputs(precomp_ctx, full_ctx, out,
- check_condition=mult.error_model.check_condition,
- precomp_to_affine=mult.error_model.precomp_to_affine,
- use_init=use_init,
- use_multiply=use_multiply)
+ check_inputs = graph_to_check_inputs(
+ precomp_ctx,
+ full_ctx,
+ out,
+ check_condition=mult.error_model.check_condition,
+ precomp_to_affine=mult.error_model.precomp_to_affine,
+ use_init=use_init,
+ use_multiply=use_multiply,
+ )
for q in divisors:
- error = evaluate_checks(check_funcs={"add": mult.error_model.check_add(q), "affine": mult.error_model.check_affine(q)},
- check_inputs=check_inputs)
+ error = evaluate_checks(
+ check_funcs={
+ "add": mult.error_model.check_add(q),
+ "affine": mult.error_model.check_affine(q),
+ },
+ check_inputs=check_inputs,
+ )
errors[q] += error
# Make probmaps smaller. Do not store zero probabilities.
probs = {}
@@ -85,12 +110,14 @@ def evaluate_multiples(mult: Config,
return ProbMap(probs, divisors_hash, samples)
-def evaluate_multiples_direct(mult: Config,
- fname: str,
- offset: int,
- divisors: set[int],
- use_init: bool = True,
- use_multiply: bool = True):
+def evaluate_multiples_direct(
+ mult: Config,
+ fname: str,
+ offset: int,
+ divisors: set[int],
+ use_init: bool = True,
+ use_multiply: bool = True,
+):
"""
Like `evaluate_multiples`, but instead reads the MultResults from a file named `fname`
at an `offset`. Still returns the ProbMap, which is significantly smaller and easier
@@ -99,4 +126,4 @@ def evaluate_multiples_direct(mult: Config,
with open(fname, "rb") as f:
f.seek(offset)
_, res = pickle.load(f)
- return evaluate_multiples(mult, res, divisors, use_init, use_multiply) \ No newline at end of file
+ return evaluate_multiples(mult, res, divisors, use_init, use_multiply)