aboutsummaryrefslogtreecommitdiff
path: root/analysis/scalarmults/epare/simulate.py
diff options
context:
space:
mode:
Diffstat (limited to 'analysis/scalarmults/epare/simulate.py')
-rw-r--r--analysis/scalarmults/epare/simulate.py102
1 files changed, 102 insertions, 0 deletions
diff --git a/analysis/scalarmults/epare/simulate.py b/analysis/scalarmults/epare/simulate.py
new file mode 100644
index 0000000..9b78436
--- /dev/null
+++ b/analysis/scalarmults/epare/simulate.py
@@ -0,0 +1,102 @@
+import random
+import pickle
+from functools import partial
+
+from .config import Config
+from .mult_results import MultResults
+
+from pyecsca.ec.params import DomainParameters
+from pyecsca.ec.mod import mod
+from pyecsca.sca.re.rpa import multiple_graph
+
+
+def simulate_multiples(mult: Config,
+ params: DomainParameters,
+ bits: int,
+ samples: int = 100,
+ seed: bytes | None = None) -> MultResults:
+ """
+ Takes a Config, which specifies a scalar multiplier (with optional countermeasures)
+ and simulates `samples` scalar multiplications, while tracking which multiples of the
+ symbolic input point get computed.
+ """
+ results = []
+ if seed is not None:
+ random.seed(seed)
+ rng = lambda n: mod(random.randrange(n), n)
+
+ # If no countermeasure is used, we have fully random scalars.
+ # Otherwise, fix one per chunk.
+ if not mult.has_countermeasure:
+ scalars = [random.randint(1, 2**bits) for _ in range(samples)]
+ else:
+ one = random.randint(1, 2**bits)
+ scalars = [one for _ in range(samples)]
+
+ for scalar in scalars:
+ results.append(multiple_graph(scalar, params, mult.mult.klass, partial(mult.partial, rng=rng)))
+ return MultResults(results, samples)
+
+
+def simulate_multiples_direct(mult: Config,
+ params: DomainParameters,
+ bits: int,
+ fname: str,
+ samples: int = 100,
+ seed: bytes | None = None) -> str:
+ """
+ Like the `simulate_multiples` function above, but stores the pickled output directly
+ into a file named `fname`.
+ """
+ result = simulate_multiples(mult, params, bits, samples, seed)
+ with open(fname, "wb") as f:
+ pickle.dump((mult, result), f)
+ return fname
+
+
+def evaluate_multiples(mult: Config,
+ res: MultResults,
+ divisors: set[int],
+ use_init: bool = True,
+ use_multiply: bool = True):
+ """
+ Takes MultIdent and MultResults and a set of divisors (base point orders `q`) and
+ evaluates them using the error model from the MultIdent. Note that the MultIdent
+ must have an error model in this case. Returns the ProbMap.
+ """
+ errors = {divisor: 0 for divisor in divisors}
+ samples = len(res)
+ divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()
+ for precomp_ctx, full_ctx, out in res:
+ check_inputs = graph_to_check_inputs(precomp_ctx, full_ctx, out,
+ check_condition=mult.error_model.check_condition,
+ precomp_to_affine=mult.error_model.precomp_to_affine,
+ use_init=use_init,
+ use_multiply=use_multiply)
+ for q in divisors:
+ error = evaluate_checks(check_funcs={"add": mult.error_model.check_add(q), "affine": mult.error_model.check_affine(q)},
+ check_inputs=check_inputs)
+ errors[q] += error
+ # Make probmaps smaller. Do not store zero probabilities.
+ probs = {}
+ for q, error in errors.items():
+ if error != 0:
+ probs[q] = error / samples
+ return ProbMap(probs, divisors_hash, samples)
+
+
+def evaluate_multiples_direct(mult: Config,
+ fname: str,
+ offset: int,
+ divisors: set[int],
+ use_init: bool = True,
+ use_multiply: bool = True):
+ """
+ Like `evaluate_multiples`, but instead reads the MultResults from a file named `fname`
+ at an `offset`. Still returns the ProbMap, which is significantly smaller and easier
+ to pickle than the MultResults.
+ """
+ with open(fname, "rb") as f:
+ f.seek(offset)
+ _, res = pickle.load(f)
+ return evaluate_multiples(mult, res, divisors, use_init, use_multiply) \ No newline at end of file