diff options
| author | J08nY | 2025-08-21 17:00:37 +0200 |
|---|---|---|
| committer | J08nY | 2025-08-21 17:00:37 +0200 |
| commit | 2f5d0ec98fdf4ea22f979f2b5d5671006da557bc (patch) | |
| tree | db8bffc919644b6d834eea92d36a3a783019971f | |
| parent | 817015cfab4966725f2a0b19df0ca814c212a80c (diff) | |
| download | ECTester-2f5d0ec98fdf4ea22f979f2b5d5671006da557bc.tar.gz ECTester-2f5d0ec98fdf4ea22f979f2b5d5671006da557bc.tar.zst ECTester-2f5d0ec98fdf4ea22f979f2b5d5671006da557bc.zip | |
Fix new multiple_graph API.
It now returns two contexts (precomp and full)..
| -rw-r--r-- | analysis/scalarmults/common.py | 7 | ||||
| -rw-r--r-- | analysis/scalarmults/simulate.ipynb | 55 | ||||
| -rw-r--r-- | analysis/scalarmults/simulate.py | 120 |
3 files changed, 36 insertions, 146 deletions
diff --git a/analysis/scalarmults/common.py b/analysis/scalarmults/common.py index 7a2563d..b307278 100644 --- a/analysis/scalarmults/common.py +++ b/analysis/scalarmults/common.py @@ -235,11 +235,12 @@ class MultIdent: class MultResults: """ A MultResults instance represents many simulated scalar multiplciation computations, which were tracked - using a `MultipleContext`. Generally, these would be for one MultIdent only, but that should be handled - separately, for example in a dict[MultIdent, MultResults]. The `samples` describe how many computations + using a `MultipleContext` (i.e. the outputs of the :func:`pyecsca.sca.re.rpa.multiple_graph` function). + Generally, these would be for one MultIdent only, but that should be handled separately, for example + in a dict[MultIdent, MultResults]. The `samples` describe how many computations are contained and must correspond to the length of the `multiplications` list. """ - multiplications: list[tuple[MultipleContext, Point]] + multiplications: list[tuple[MultipleContext, MultipleContext, Point]] samples: int duration: Optional[float] = None diff --git a/analysis/scalarmults/simulate.ipynb b/analysis/scalarmults/simulate.ipynb index 9177390..47049f2 100644 --- a/analysis/scalarmults/simulate.ipynb +++ b/analysis/scalarmults/simulate.ipynb @@ -5,7 +5,7 @@ "id": "805d746e-610b-4d40-80d2-a8080a993f96", "metadata": {}, "source": [ - "# Simulating EPA-RE using points of low-order\n", + "## Simulating EPA-RE using points of low-order\n", "\n", "As visible in the [`formulas`](formulas.ipynb) notebook, most addition formulas have exceptional cases.\n", "We can use trigger these exceptions by supplying points of low order to the scalar multiplier, which\n", @@ -156,8 +156,6 @@ " params: DomainParameters,\n", " bits: int,\n", " samples: int = 100,\n", - " use_init: bool = True,\n", - " use_multiply: bool = True,\n", " seed: bytes | None = None) -> MultResults:\n", " \"\"\"\n", " Takes a MultIdent, which specifies a scalar multiplier (with an optional countermeasure)\n", @@ -177,7 +175,7 @@ " scalars = [one for _ in range(samples)]\n", "\n", " for scalar in scalars:\n", - " results.append(multiple_graph(scalar, params, mult.klass, mult.partial, use_init, use_multiply))\n", + " results.append(multiple_graph(scalar, params, mult.klass, mult.partial))\n", " return MultResults(results, samples)" ] }, @@ -193,8 +191,6 @@ " bits: int,\n", " fname: str,\n", " samples: int = 100,\n", - " use_init: bool = True,\n", - " use_multiply: bool = True,\n", " seed: bytes | None = None) -> str:\n", " \"\"\"\n", " Like the `simulate_multiples` function above, but stores the pickled output directly\n", @@ -213,7 +209,7 @@ " scalars = [one for _ in range(samples)]\n", "\n", " for scalar in scalars:\n", - " results.append(multiple_graph(scalar, params, mult.klass, mult.partial, use_init, use_multiply))\n", + " results.append(multiple_graph(scalar, params, mult.klass, mult.partial))\n", " result = MultResults(results, samples)\n", " with open(fname, \"wb\") as f:\n", " pickle.dump((mult, result), f)\n", @@ -227,7 +223,11 @@ "metadata": {}, "outputs": [], "source": [ - "def evaluate_multiples(mult: MultIdent, res: MultResults, divisors: set[int]):\n", + "def evaluate_multiples(mult: MultIdent,\n", + " res: MultResults,\n", + " divisors: set[int],\n", + " use_init: bool = True,\n", + " use_multiply: bool = True):\n", " \"\"\"\n", " Takes MultIdent and MultResults and a set of divisors (base point orders `q`) and\n", " evaluates them using the error model from the MultIdent. Note that the MultIdent\n", @@ -236,10 +236,12 @@ " errors = {divisor: 0 for divisor in divisors}\n", " samples = len(res)\n", " divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()\n", - " for ctx, out in res:\n", - " check_inputs = graph_to_check_inputs(ctx, out,\n", + " for precomp_ctx, full_ctx, out in res:\n", + " check_inputs = graph_to_check_inputs(precomp_ctx, full_ctx, out,\n", " check_condition=mult.error_model.check_condition,\n", - " precomp_to_affine=mult.error_model.precomp_to_affine)\n", + " precomp_to_affine=mult.error_model.precomp_to_affine,\n", + " use_init=use_init,\n", + " use_multiply=use_multiply)\n", " for q in divisors:\n", " error = evaluate_checks(check_funcs={\"add\": mult.error_model.check_add(q), \"affine\": mult.error_model.check_affine(q)},\n", " check_inputs=check_inputs)\n", @@ -259,7 +261,12 @@ "metadata": {}, "outputs": [], "source": [ - "def evaluate_multiples_direct(mult: MultIdent, fname: str, offset: int, divisors: set[int]):\n", + "def evaluate_multiples_direct(mult: MultIdent,\n", + " fname: str,\n", + " offset: int,\n", + " divisors: set[int],\n", + " use_init: bool = True,\n", + " use_multiply: bool = True):\n", " \"\"\"\n", " Like `evaluate_multiples`, but instead reads the MultResults from a file named `fname`\n", " at an `offset`. Still returns the ProbMap, which is significantly smaller and easier\n", @@ -271,10 +278,12 @@ " errors = {divisor: 0 for divisor in divisors}\n", " samples = len(res)\n", " divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()\n", - " for ctx, out in res:\n", - " check_inputs = graph_to_check_inputs(ctx, out,\n", + " for precomp_ctx, full_ctx, out in res:\n", + " check_inputs = graph_to_check_inputs(precomp_ctx, full_ctx, out,\n", " check_condition=mult.error_model.check_condition,\n", - " precomp_to_affine=mult.error_model.precomp_to_affine)\n", + " precomp_to_affine=mult.error_model.precomp_to_affine,\n", + " use_init=use_init,\n", + " use_multiply=use_multiply)\n", " for q in divisors:\n", " error = evaluate_checks(check_funcs={\"add\": mult.error_model.check_add(q), \"affine\": mult.error_model.check_affine(q)},\n", " check_inputs=check_inputs)\n", @@ -309,8 +318,8 @@ " for i, mult in enumerate(all_mults_with_ctr):\n", " pool.submit_task(mult,\n", " simulate_multiples_direct,\n", - " mult, params, bits, tmp_path / f\"{i}.pickle\", samples, seed=chunk_id, use_init=use_init, use_multiply=use_multiply)\n", - " with open(f\"multiples_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk{chunk_id}.pickle\",\"wb\") as h:\n", + " mult, params, bits, tmp_path / f\"{i}.pickle\", samples, seed=chunk_id)\n", + " with open(f\"multiples_{bits}_chunk{chunk_id}.pickle\",\"wb\") as h:\n", " for mult, future in tqdm(pool.as_completed(), desc=\"Computing multiple graphs.\", total=len(pool.tasks)):\n", " print(f\"Got {mult}.\")\n", " if error := future.exception():\n", @@ -345,11 +354,11 @@ "outputs": [], "source": [ "with TaskExecutor(max_workers=num_workers, initializer=silence) as pool:\n", - " for in_fname in tqdm(glob.glob(f\"multiples_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n", + " for in_fname in tqdm(glob.glob(f\"multiples_{bits}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n", " \n", - " match = re.match(\"multiples_(?P<bits>[0-9]+)_(?P<init>(?:no)?init)_(?P<mult>(?:no)?mult)_chunk(?P<id>[0-9a-f]+).pickle\", in_fname)\n", + " match = re.match(\"multiples_(?P<bits>[0-9]+)_chunk(?P<id>[0-9a-f]+).pickle\", in_fname)\n", " chunk_id = match.group(\"id\")\n", - " out_fname = f\"probs_{bits}_{use_init}_{use_multiply}_chunk{chunk_id}.pickle\"\n", + " out_fname = f\"probs_{bits}_{'i' if use_init else 'ni'}_{'m' if use_multiply else 'nm'}_chunk{chunk_id}.pickle\"\n", "\n", " in_file = Path(in_fname)\n", " out_file = Path(out_fname)\n", @@ -393,7 +402,7 @@ " # Pass the file name and offset to speed up computation start.\n", " pool.submit_task(full,\n", " evaluate_multiples_direct,\n", - " full, in_fname, start, divisor_map[\"all\"])\n", + " full, in_fname, start, divisor_map[\"all\"], use_init, use_multiply)\n", " gc.collect()\n", " for full, future in pool.as_completed(wait=False):\n", " processing_bar.update(1)\n", @@ -436,9 +445,9 @@ "outputs": [], "source": [ "probmaps = {}\n", - "for in_fname in tqdm(glob.glob(f\"probs_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n", + "for in_fname in tqdm(glob.glob(f\"probs_{bits}_{'i' if use_init else 'ni'}_{'m' if use_multiply else 'nm'}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n", " \n", - " match = re.match(\"probs_(?P<bits>[0-9]+)_(?P<init>(?:no)?init)_(?P<mult>(?:no)?mult)_chunk(?P<id>[0-9a-f]+).pickle\", in_fname)\n", + " match = re.match(\"probs_(?P<bits>[0-9]+)_(?P<init>(?:n)?i)_(?P<mult>(?:n)?m)_chunk(?P<id>[0-9a-f]+).pickle\", in_fname)\n", " chunk_id = match.group(\"id\")\n", " \n", " with open(in_fname, \"rb\") as f:\n", diff --git a/analysis/scalarmults/simulate.py b/analysis/scalarmults/simulate.py deleted file mode 100644 index dd9c68c..0000000 --- a/analysis/scalarmults/simulate.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# # Simulating EPA-RE using points of low-order - - -import pickle -import itertools -import glob -import random -import sys -import time -import os - -import matplotlib -import matplotlib.pyplot as plt -import numpy as np - -from collections import Counter - -from pathlib import Path -from random import randint, randbytes, shuffle -from typing import Type, Any, Tuple - -from tqdm.auto import tqdm, trange - -from pyecsca.ec.params import DomainParameters, get_params -from pyecsca.ec.mult import * -from pyecsca.sca.re.rpa import multiples_computed -from pyecsca.misc.utils import TaskExecutor - -from common import * - - -def get_general_multiples(bits: int, samples: int = 1000) -> MultResults: - from random import randint - results = [] - for _ in range(samples): - big_scalar = randint(1, 2**bits) - results.append({big_scalar}) - return MultResults(results, samples) - - -def get_general_n_multiples(bits: int, n: int, samples: int = 1000) -> MultResults: - from random import randint - results = [] - for _ in range(samples): - smult = set() - for i in range(n): - b = randint(1,256) - smult.add(randint(2**b,2**(b+1))) - results.append(smult) - return MultResults(results, samples) - - -def get_small_scalar_multiples(mult: MultIdent, - params: DomainParameters, - bits: int, - samples: int = 1000, - use_init: bool = True, - use_multiply: bool = True, - seed: bytes | None = None, - kind: str = "precomp+necessary") -> Tuple[MultResults, float]: - - duration = -time.perf_counter() - results = [] - if seed is not None: - random.seed(seed) - - # If no countermeasure is used, we have fully random scalars. - # Otherwise, fix one per chunk. - if mult.countermeasure is None: - scalars = [random.randint(1, 2**bits) for _ in range(samples)] - else: - one = random.randint(1, 2**bits) - scalars = [one for _ in range(samples)] - - for scalar in scalars: - # Use a list for less memory usage. - results.append(list(multiples_computed(scalar, params, mult.klass, mult.partial, use_init, use_multiply, kind=kind))) - duration += time.perf_counter() - return MultResults(results, samples, duration=duration, kind=kind) - - -if __name__ == "__main__": - category = "secg" - curve = "secp256r1" - params = get_params(category, curve, "projective") - num_workers = int(sys.argv[1]) if len(sys.argv) > 1 else 32 - bits = params.order.bit_length() - samples = int(sys.argv[2]) if len(sys.argv) > 2 else 100 - kind = sys.argv[3] if len(sys.argv) > 3 else "precomp+necessary" - use_init = (sys.argv[4].lower() == "true") if len(sys.argv) > 4 else True - use_multiply = (sys.argv[5].lower() == "true") if len(sys.argv) > 5 else True - selected_mults = all_mults - shuffle(selected_mults) - - if (scratch := os.getenv("SCRATCHDIR")) is not None: - outdir = Path(scratch) - else: - outdir = Path.cwd() - - print(f"Running on {num_workers} cores, doing {samples} samples.") - - chunk_id = randbytes(6).hex() - with TaskExecutor(max_workers=num_workers) as pool: - for mult in selected_mults: - for countermeasure in (None, "gsr", "additive", "multiplicative", "euclidean", "bt"): - mwc = mult.with_countermeasure(countermeasure) - pool.submit_task(mwc, - get_small_scalar_multiples, - mwc, params, bits, samples, use_init=use_init, use_multiply=use_multiply, seed=chunk_id, kind=kind) - for mult, future in tqdm(pool.as_completed(), desc="Computing small scalar distributions.", total=len(pool.tasks), smoothing=0): - if error := future.exception(): - print("Error", mult, error) - raise error - res = future.result() - print(f"Got {mult} in {res.duration}.") - with (outdir / f"multiples_{bits}_{kind}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk{chunk_id}.pickle").open("ab") as f: - pickle.dump((mult, res), f) |
