aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJ08nY2025-03-12 15:49:13 +0100
committerJ08nY2025-04-16 12:25:06 +0200
commit252d9dc0531a4e0828bd94c605072e93b95767e0 (patch)
tree33425878785d9a75d9b47de0f5aae8a91167a043
parent558ed53bce450b0be0c3bd5b7721e6016da957a4 (diff)
downloadECTester-252d9dc0531a4e0828bd94c605072e93b95767e0.tar.gz
ECTester-252d9dc0531a4e0828bd94c605072e93b95767e0.tar.zst
ECTester-252d9dc0531a4e0828bd94c605072e93b95767e0.zip
-rw-r--r--epare/collect_leia.ipynb356
-rw-r--r--epare/common.py132
-rw-r--r--epare/simulate.ipynb165
-rw-r--r--epare/visualize.ipynb257
4 files changed, 693 insertions, 217 deletions
diff --git a/epare/collect_leia.ipynb b/epare/collect_leia.ipynb
new file mode 100644
index 0000000..e54e643
--- /dev/null
+++ b/epare/collect_leia.ipynb
@@ -0,0 +1,356 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "eab24cb8-af40-4cc1-be5f-f120fe9932ee",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pyecsca.sca.target.ectester import ECTesterTargetLEIA, KeypairEnum, ParameterEnum, CurveEnum, KeyEnum, KeyClassEnum, KeyBuildEnum, KeyAgreementEnum, SignatureEnum, TransformationEnum\n",
+ "from pyecsca.ec.params import load_params_ectester\n",
+ "from pyecsca.sca.scope.picoscope_sdk import PS6000Scope\n",
+ "from pyecsca.sca.trace import Trace\n",
+ "from pyecsca.sca.trace.plot import plot_trace\n",
+ "from pyecsca.sca.scope import SampleType\n",
+ "\n",
+ "import numpy as np\n",
+ "from time import sleep\n",
+ "from smartleia import LEIA, TriggerPoints\n",
+ "\n",
+ "import holoviews as hv\n",
+ "\n",
+ "hv.extension(\"bokeh\")\n",
+ "%opts RGB [height=600, responsive=True]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "865953a5-35ad-473e-a57f-f26368145987",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sl = LEIA()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "885fba43-7546-48ba-bff4-b6314fd642e6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester = ECTesterTargetLEIA(sl)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8c33b783-7a7e-45f4-9e91-d3df54e1abe0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scope = PS6000Scope()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "729688c3-d448-4d1a-80c4-666680fcca35",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scope.open()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "23037143-e39c-4dd6-bf65-9cdcb0d7b266",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(scope.get_variant())\n",
+ "actual_freq, n_samples = scope.setup_frequency(frequency=250_000_000, pretrig=0, posttrig=150_000_000)\n",
+ "scope.setup_channel(channel=\"A\", coupling=\"DC\", range=1, offset=0, enable=True)\n",
+ "scope.setup_channel(channel=\"B\", coupling=\"DC\", range=0.1, offset=-0.4, enable=True)\n",
+ "scope.setup_trigger(channel=\"A\", threshold=0.2, direction=\"rising\", delay=0, timeout=5000, enable=True)\n",
+ "scope.setup_capture(channel=\"B\", enable=True)\n",
+ "print(actual_freq, n_samples)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "07369045-7eee-4e85-ac4e-37e5396fd880",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.connect()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "89ed5db7-fbbb-4b32-89a5-6486204feed6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.atr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a16af658-34d5-455e-a609-3075ede7dad7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.select_applet()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ee3ec16b-d9ac-4cd7-8ad5-4dfe0c0d5465",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.info()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "27c4a7f7-3029-46d9-a3d5-3f4c0b56e37e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.allocate(KeypairEnum.KEYPAIR_LOCAL,\n",
+ " KeyBuildEnum.BUILD_KEYPAIR,\n",
+ " 256,\n",
+ " KeyClassEnum.ALG_EC_FP)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f952cf17-88f2-47d7-88f3-346c6cbd0572",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.allocate_ka(KeyAgreementEnum.ALG_EC_SVDP_DH)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4d67e5a3-79d3-4160-89c9-07f1cd697f52",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "params = load_params_ectester(\"../common/src/main/resources/cz/crcs/ectester/data/secg/secp256r1.csv\", \"affine\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "94e9c2c5-d231-459a-a601-b14a123fce62",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.set(KeypairEnum.KEYPAIR_LOCAL,\n",
+ " CurveEnum.external,\n",
+ " ParameterEnum.DOMAIN_FP,\n",
+ " ECTesterTargetLEIA.encode_parameters(ParameterEnum.DOMAIN_FP, params))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8d36f397-15ca-4e4a-8a87-dee10fe4398e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sl.set_trigger_strategy(1, point_list=[TriggerPoints.TRIG_PRE_SEND_APDU], delay=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "26be2fb8-607c-4bce-8968-ba57938f9a89",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scope.arm()\n",
+ "sleep(2)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "83bfe8a5-00eb-4b17-8762-1ba0b07139e6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.generate(KeypairEnum.KEYPAIR_LOCAL)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "253a00c7-28d3-4648-b5d7-9c6876f37362",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scope.capture(10000)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2a0836a3-9d07-444c-a4eb-a661885ff3e8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "trace_gen = scope.retrieve(\"B\", SampleType.Volt)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f14f244d-fee6-44a9-8858-783d7ffb9f24",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plot_trace(trace_gen)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "08978417-6f39-4c40-a862-9ad43179dccc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scope.arm()\n",
+ "sleep(2)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ad17301a-902f-48e9-936e-f553a1a8f006",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.ecdh_direct(KeypairEnum.KEYPAIR_LOCAL,\n",
+ " True,\n",
+ " TransformationEnum.NONE,\n",
+ " KeyAgreementEnum.ALG_EC_SVDP_DH,\n",
+ " bytes.fromhex(\"046b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c2964fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5\")) # pubkey as bytes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2282c28d-4745-43b2-b321-a34365f55753",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scope.capture(10000)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d0cdabb8-a4ee-4b19-a8ed-ae2e9f634a37",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "trace_ecdh = scope.retrieve(\"B\", SampleType.Volt)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c899b299-25f5-418b-baaa-fa8e4399daa9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plot_trace(trace_ecdh)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "195ed167-8925-44c9-b0fb-6e207f8fdf8a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.cleanup()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "264b4653-023c-4a39-8970-270c2f0d42c6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ectester.disconnect()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1d706782-7124-4879-8198-407e45f131ff",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scope.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "768a8fe0-27ec-4678-a66a-66e7680fd6c8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pyecsca.sca.trace.process import rolling_mean, recenter\n",
+ "from pyecsca.sca.trace.filter import filter_lowpass"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2c62eccf-eae1-48be-b562-222504e8e6cf",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/epare/common.py b/epare/common.py
index 3b10007..4689ce5 100644
--- a/epare/common.py
+++ b/epare/common.py
@@ -10,10 +10,12 @@ from dataclasses import dataclass
from functools import partial, cached_property
from importlib import import_module, invalidate_caches
from pathlib import Path
-from typing import Type, Any
+from typing import Type, Any, Optional
+from enum import Enum
from pyecsca.ec.params import DomainParameters, get_params
from pyecsca.ec.mult import *
+from pyecsca.ec.countermeasures import GroupScalarRandomization, AdditiveSplitting, MultiplicativeSplitting, EuclideanSplitting
spawn_context = multiprocessing.get_context("spawn")
@@ -42,24 +44,46 @@ class MultIdent:
klass: Type[ScalarMultiplier]
args: list[Any]
kwargs: dict[str, Any]
+ countermeasure: Optional[str] = None
def __init__(self, klass: Type[ScalarMultiplier], *args, **kwargs):
object.__setattr__(self, "klass", klass)
object.__setattr__(self, "args", args if args is not None else [])
+ if kwargs is not None and "countermeasure" in kwargs:
+ object.__setattr__(self, "countermeasure", kwargs["countermeasure"])
+ del kwargs["countermeasure"]
object.__setattr__(self, "kwargs", kwargs if kwargs is not None else {})
-
+
@cached_property
def partial(self):
- return partial(self.klass, *self.args, **self.kwargs)
+ func = partial(self.klass, *self.args, **self.kwargs)
+ if self.countermeasure is None:
+ return func
+ if self.countermeasure == "gsr":
+ return lambda *args, **kwargs: GroupScalarRandomization(func(*args, **kwargs))
+ elif self.countermeasure == "additive":
+ return lambda *args, **kwargs: AdditiveSplitting(func(*args, **kwargs))
+ elif self.countermeasure == "multiplicative":
+ return lambda *args, **kwargs: MultiplicativeSplitting(func(*args, **kwargs))
+ elif self.countermeasure == "euclidean":
+ return lambda *args, **kwargs: EuclideanSplitting(func(*args, **kwargs))
+
+ def with_countermeasure(self, countermeasure: str):
+ if countermeasure not in (None, "gsr", "additive", "multiplicative", "euclidean"):
+ raise ValueError(f"Unknown countermeasure: {countermeasure}")
+ return MultIdent(self.klass, *self.args, **self.kwargs, countermeasure=countermeasure)
def __str__(self):
- return f"{self.klass.__name__}_{self.args}_{self.kwargs}"
+ args = ("_" + ",".join(list(map(str, self.args)))) if self.args else ""
+ kwargs = ("_" + ",".join(f"{str(k)}:{v.name if isinstance(v, Enum) else str(v)}" for k,v in self.kwargs.items())) if self.kwargs else ""
+ countermeasure = f"+{self.countermeasure}" if self.countermeasure is not None else ""
+ return f"{self.klass.__name__}{args}{kwargs}{countermeasure}"
def __repr__(self):
return str(self)
def __hash__(self):
- return hash((self.klass, tuple(self.args), tuple(self.kwargs.keys()), tuple(self.kwargs.values())))
+ return hash((self.klass, self.countermeasure, tuple(self.args), tuple(self.kwargs.keys()), tuple(self.kwargs.values())))
@dataclass
@@ -85,3 +109,101 @@ class MultResults:
def __repr__(self):
return str(self)
+
+
+@dataclass
+class ProbMap:
+ probs: dict[int, float]
+ samples: int
+
+ def __len__(self):
+ return len(self.probs)
+
+ def __iter__(self):
+ yield from self.probs
+
+ def __getitem__(self, i):
+ return self.probs[i]
+
+ def keys(self):
+ return self.probs.keys()
+
+ def values(self):
+ return self.probs.values()
+
+ def items(self):
+ return self.probs.items()
+
+ def merge(self, other: "ProbMap"):
+ new_keys = set(self.keys()).union(other.keys())
+ result = {}
+ for key in new_keys:
+ if key in self and key in other:
+ result[key] = (self[key] * self.samples + other[key] * other.samples) / (self.samples + other.samples)
+ elif key in self:
+ result[key] = self[key]
+ elif key in other:
+ result[key] = other[key]
+ self.probs = result
+ self.samples += other.samples
+
+ def enrich(self, other: "ProbMap"):
+ if self.samples != other.samples:
+ raise ValueError("Enriching can only work on equal amount of samples (same run, different divisors)")
+ self.probs.update(other.probs)
+
+# All dbl-and-add multipliers from https://github.com/J08nY/pyecsca/blob/master/pyecsca/ec/mult
+
+window_mults = [
+ MultIdent(SlidingWindowMultiplier, width=3),
+ MultIdent(SlidingWindowMultiplier, width=4),
+ MultIdent(SlidingWindowMultiplier, width=5),
+ MultIdent(SlidingWindowMultiplier, width=6),
+ MultIdent(FixedWindowLTRMultiplier, m=2**4),
+ MultIdent(FixedWindowLTRMultiplier, m=2**5),
+ MultIdent(FixedWindowLTRMultiplier, m=2**6),
+ MultIdent(WindowBoothMultiplier, width=3),
+ MultIdent(WindowBoothMultiplier, width=4),
+ MultIdent(WindowBoothMultiplier, width=5),
+ MultIdent(WindowBoothMultiplier, width=6)
+]
+naf_mults = [
+ MultIdent(WindowNAFMultiplier, width=3),
+ MultIdent(WindowNAFMultiplier, width=4),
+ MultIdent(WindowNAFMultiplier, width=5),
+ MultIdent(WindowNAFMultiplier, width=6),
+ MultIdent(BinaryNAFMultiplier, direction=ProcessingDirection.LTR),
+ MultIdent(BinaryNAFMultiplier, direction=ProcessingDirection.RTL)
+]
+comb_mults = [
+ MultIdent(CombMultiplier, width=2),
+ MultIdent(CombMultiplier, width=3),
+ MultIdent(CombMultiplier, width=4),
+ MultIdent(CombMultiplier, width=5),
+ MultIdent(CombMultiplier, width=6),
+ MultIdent(BGMWMultiplier, width=2, direction=ProcessingDirection.LTR),
+ MultIdent(BGMWMultiplier, width=3, direction=ProcessingDirection.LTR),
+ MultIdent(BGMWMultiplier, width=4, direction=ProcessingDirection.LTR),
+ MultIdent(BGMWMultiplier, width=5, direction=ProcessingDirection.LTR),
+ MultIdent(BGMWMultiplier, width=6, direction=ProcessingDirection.LTR),
+ MultIdent(BGMWMultiplier, width=2, direction=ProcessingDirection.RTL),
+ MultIdent(BGMWMultiplier, width=3, direction=ProcessingDirection.RTL),
+ MultIdent(BGMWMultiplier, width=4, direction=ProcessingDirection.RTL),
+ MultIdent(BGMWMultiplier, width=5, direction=ProcessingDirection.RTL),
+ MultIdent(BGMWMultiplier, width=6, direction=ProcessingDirection.RTL)
+]
+binary_mults = [
+ MultIdent(LTRMultiplier, always=False),
+ MultIdent(LTRMultiplier, always=True),
+ MultIdent(RTLMultiplier, always=False),
+ MultIdent(RTLMultiplier, always=True),
+ MultIdent(CoronMultiplier)
+]
+other_mults = [
+ MultIdent(FullPrecompMultiplier, always=False),
+ MultIdent(FullPrecompMultiplier, always=True),
+ MultIdent(SimpleLadderMultiplier, complete=True),
+ MultIdent(SimpleLadderMultiplier, complete=False)
+]
+
+all_mults = window_mults + naf_mults + binary_mults + other_mults + comb_mults
diff --git a/epare/simulate.ipynb b/epare/simulate.ipynb
index 74cfe2b..4a0a07a 100644
--- a/epare/simulate.ipynb
+++ b/epare/simulate.ipynb
@@ -17,6 +17,7 @@
"source": [
"import pickle\n",
"import itertools\n",
+ "import glob\n",
"\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
@@ -25,7 +26,7 @@
"from collections import Counter\n",
"\n",
"from pathlib import Path\n",
- "from random import randint\n",
+ "from random import randint, randbytes\n",
"from typing import Type, Any\n",
"\n",
"from bs4 import BeautifulSoup\n",
@@ -33,13 +34,10 @@
"\n",
"from pyecsca.ec.params import DomainParameters, get_params\n",
"from pyecsca.ec.mult import *\n",
- "from pyecsca.sca.re.rpa import MultipleContext, rpa_distinguish, RPA, multiples_computed\n",
- "from pyecsca.ec.context import DefaultContext, local\n",
- "from pyecsca.ec.model import ShortWeierstrassModel\n",
- "from pyecsca.ec.coordinates import AffineCoordinateModel\n",
+ "from pyecsca.sca.re.rpa import multiples_computed\n",
"from pyecsca.misc.utils import TaskExecutor\n",
"\n",
- "from common import MultIdent, MultResults, enable_spawn, spawn_context"
+ "from common import *"
]
},
{
@@ -53,70 +51,6 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "5c0e42dc-8c61-4e2e-962c-6af48f6eb321",
- "metadata": {},
- "outputs": [],
- "source": [
- "# All dbl-and-add multipliers from https://github.com/J08nY/pyecsca/blob/master/pyecsca/ec/mult\n",
- "\n",
- "window_mults = [\n",
- " MultIdent(SlidingWindowMultiplier, width=3),\n",
- " MultIdent(SlidingWindowMultiplier, width=4),\n",
- " MultIdent(SlidingWindowMultiplier, width=5),\n",
- " MultIdent(SlidingWindowMultiplier, width=6),\n",
- " MultIdent(FixedWindowLTRMultiplier, m=2**4),\n",
- " MultIdent(FixedWindowLTRMultiplier, m=2**5),\n",
- " MultIdent(FixedWindowLTRMultiplier, m=2**6),\n",
- " MultIdent(WindowBoothMultiplier, width=3),\n",
- " MultIdent(WindowBoothMultiplier, width=4),\n",
- " MultIdent(WindowBoothMultiplier, width=5),\n",
- " MultIdent(WindowBoothMultiplier, width=6)\n",
- "]\n",
- "naf_mults = [\n",
- " MultIdent(WindowNAFMultiplier, width=3),\n",
- " MultIdent(WindowNAFMultiplier, width=4),\n",
- " MultIdent(WindowNAFMultiplier, width=5),\n",
- " MultIdent(WindowNAFMultiplier, width=6),\n",
- " MultIdent(BinaryNAFMultiplier, direction=ProcessingDirection.LTR),\n",
- " MultIdent(BinaryNAFMultiplier, direction=ProcessingDirection.RTL)\n",
- "]\n",
- "comb_mults = [\n",
- " MultIdent(CombMultiplier, width=2),\n",
- " MultIdent(CombMultiplier, width=3),\n",
- " MultIdent(CombMultiplier, width=4),\n",
- " MultIdent(CombMultiplier, width=5),\n",
- " MultIdent(CombMultiplier, width=6),\n",
- " MultIdent(BGMWMultiplier, width=2, direction=ProcessingDirection.LTR),\n",
- " MultIdent(BGMWMultiplier, width=3, direction=ProcessingDirection.LTR),\n",
- " MultIdent(BGMWMultiplier, width=4, direction=ProcessingDirection.LTR),\n",
- " MultIdent(BGMWMultiplier, width=5, direction=ProcessingDirection.LTR),\n",
- " MultIdent(BGMWMultiplier, width=6, direction=ProcessingDirection.LTR),\n",
- " MultIdent(BGMWMultiplier, width=2, direction=ProcessingDirection.RTL),\n",
- " MultIdent(BGMWMultiplier, width=3, direction=ProcessingDirection.RTL),\n",
- " MultIdent(BGMWMultiplier, width=4, direction=ProcessingDirection.RTL),\n",
- " MultIdent(BGMWMultiplier, width=5, direction=ProcessingDirection.RTL),\n",
- " MultIdent(BGMWMultiplier, width=6, direction=ProcessingDirection.RTL)\n",
- "]\n",
- "binary_mults = [\n",
- " MultIdent(LTRMultiplier, always=False),\n",
- " MultIdent(LTRMultiplier, always=True),\n",
- " MultIdent(RTLMultiplier, always=False),\n",
- " MultIdent(RTLMultiplier, always=True),\n",
- " MultIdent(CoronMultiplier)\n",
- "]\n",
- "other_mults = [\n",
- " MultIdent(FullPrecompMultiplier, always=False),\n",
- " MultIdent(FullPrecompMultiplier, always=True),\n",
- " MultIdent(SimpleLadderMultiplier, complete=True),\n",
- " MultIdent(SimpleLadderMultiplier, complete=False)\n",
- "]\n",
- "\n",
- "all_mults = window_mults + naf_mults + binary_mults + other_mults + comb_mults"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
"id": "a660e3ac-401b-47a0-92de-55afe63c420a",
"metadata": {},
"outputs": [],
@@ -152,13 +86,31 @@
" results.append(smult)\n",
" return MultResults(results, samples)\n",
"\n",
- "def get_small_scalar_multiples(mult: MultIdent, params: DomainParameters, bits: int, samples: int = 1000, use_init: bool = True, use_multiply: bool = True) -> MultResults:\n",
+ "def get_small_scalar_multiples(mult: MultIdent,\n",
+ " params: DomainParameters,\n",
+ " bits: int,\n",
+ " samples: int = 1000,\n",
+ " use_init: bool = True,\n",
+ " use_multiply: bool = True,\n",
+ " seed: bytes | None = None) -> MultResults:\n",
" from pyecsca.sca.re.rpa import multiples_computed\n",
- " from random import randint\n",
+ " import random\n",
+ " \n",
" results = []\n",
- " for _ in range(samples):\n",
- " big_scalar = randint(1, 2**bits)\n",
- " results.append(multiples_computed(big_scalar, params, mult.klass, mult.partial, use_init, use_multiply))\n",
+ " if seed is not None:\n",
+ " random.seed(seed)\n",
+ "\n",
+ " # If no countermeasure is used, we have fully random scalars.\n",
+ " # Otherwise, fix one per chunk.\n",
+ " if mult.countermeasure is None:\n",
+ " scalars = [random.randint(1, 2**bits) for _ in range(samples)]\n",
+ " else:\n",
+ " one = random.randint(1, 2**bits)\n",
+ " scalars = [one for _ in range(samples)]\n",
+ "\n",
+ " for scalar in scalars:\n",
+ " # Use a list for less memory usage.\n",
+ " results.append(list(multiples_computed(scalar, params, mult.klass, mult.partial, use_init, use_multiply)))\n",
" return MultResults(results, samples)"
]
},
@@ -173,16 +125,6 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "bb604b15-4ad6-43c0-9cfa-1b31611d73ce",
- "metadata": {},
- "outputs": [],
- "source": [
- "multiples_mults = {}"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
"id": "4d5c7f10-618f-4612-b594-81d1607b0d1d",
"metadata": {},
"outputs": [],
@@ -202,7 +144,7 @@
"metadata": {},
"source": [
"## Run\n",
- "Run this cell as many times as you want. It will accumulate into multiples_mults."
+ "Run this cell as many times as you want. It will write chunks into files."
]
},
{
@@ -212,11 +154,15 @@
"metadata": {},
"outputs": [],
"source": [
+ "multiples_mults = {}\n",
+ "chunk_id = randbytes(4).hex()\n",
"with TaskExecutor(max_workers=num_workers, mp_context=spawn_context) as pool, enable_spawn(get_small_scalar_multiples) as target:\n",
" for mult in selected_mults:\n",
- " pool.submit_task(mult,\n",
- " target,\n",
- " mult, params, bits, samples)\n",
+ " for countermeasure in (None, \"gsr\", \"additive\", \"multiplicative\", \"euclidean\"):\n",
+ " mwc = mult.with_countermeasure(countermeasure)\n",
+ " pool.submit_task(mwc,\n",
+ " target,\n",
+ " mwc, params, bits, samples, seed=chunk_id)\n",
" for mult, future in tqdm(pool.as_completed(), desc=\"Computing small scalar distributions.\", total=len(pool.tasks)):\n",
" print(f\"Got {mult}.\")\n",
" if error := future.exception():\n",
@@ -227,46 +173,49 @@
" multiples_mults[mult] = res\n",
" else:\n",
" # Accumulate\n",
- " multiples_mults[mult].merge(res)"
+ " multiples_mults[mult].merge(res)\n",
+ " # Handle the enable_spawn trick that messes up class modules.\n",
+ " for k, v in multiples_mults.items():\n",
+ " v.__class__ = MultResults\n",
+ " v.__module__ = \"common\"\n",
+ "with open(f\"multiples_{category}_{curve}_{bits}_ctr_chunk{chunk_id}.pickle\",\"wb\") as h:\n",
+ " pickle.dump(multiples_mults, h)"
]
},
{
"cell_type": "markdown",
- "id": "6845ba69-74b0-4709-a64d-dd4860255ee2",
+ "id": "b4471a1d-fdc3-4be7-bd61-5ddd22180b41",
"metadata": {},
"source": [
- "### Save"
+ "### Load\n",
+ "Beware, the following load with try to load all chunks into memory, that will be very large."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "0ae7f726-2981-48af-8ae3-a9afcf2dc18f",
+ "id": "3d291832-b0c7-4c3a-9989-22079e4e0f53",
"metadata": {},
"outputs": [],
"source": [
- "with open(f\"multiples_{category}_{curve}_{bits}\",\"wb\") as h:\n",
- " pickle.dump(multiples_mults, h)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "b4471a1d-fdc3-4be7-bd61-5ddd22180b41",
- "metadata": {},
- "source": [
- "### Load"
+ "multiples_mults = {}\n",
+ "for fname in glob.glob(f\"multiples_{category}_{curve}_{bits}_chunk*.pickle\"):\n",
+ " with open(fname, \"rb\") as f:\n",
+ " multiples_loaded = pickle.load(f)\n",
+ " for mult, vals in multiples_loaded.items():\n",
+ " if mult not in multiples_mults:\n",
+ " multiples_mults[mult] = vals\n",
+ " else:\n",
+ " multiples_mults[mult].merge(vals)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "3d291832-b0c7-4c3a-9989-22079e4e0f53",
+ "id": "264385f2-0f58-4f18-bfc2-61f2c5b6e5c8",
"metadata": {},
"outputs": [],
- "source": [
- "with open(f\"multiples_{category}_{curve}_{bits}\", \"rb\") as f:\n",
- " multiples_mults = pickle.load(f)"
- ]
+ "source": []
}
],
"metadata": {
diff --git a/epare/visualize.ipynb b/epare/visualize.ipynb
index 8fa5bb1..4b74e30 100644
--- a/epare/visualize.ipynb
+++ b/epare/visualize.ipynb
@@ -9,6 +9,8 @@
"source": [
"import pickle\n",
"import itertools\n",
+ "import glob\n",
+ "import gc\n",
"\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
@@ -21,56 +23,33 @@
"from bs4 import BeautifulSoup\n",
"from tqdm.auto import tqdm, trange\n",
"\n",
- "from common import MultIdent, MultResults"
+ "from pyecsca.ec.params import get_params\n",
+ "from pyecsca.ec.mult import *\n",
+ "from pyecsca.misc.utils import TaskExecutor\n",
+ "\n",
+ "from common import *\n",
+ "\n",
+ "%matplotlib ipympl"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "2130254b-4b88-4928-9fa0-88fa58de9fc7",
+ "id": "e89e66dc-4a9b-4320-8612-a8fa9af04b69",
"metadata": {},
"outputs": [],
"source": [
- "# All dbl-and-add multipliers from https://github.com/J08nY/pyecsca/blob/master/pyecsca/ec/mult\n",
- "\n",
- "window_mults = [\n",
- " MultIdent(SlidingWindowMultiplier, width=4),\n",
- " MultIdent(SlidingWindowMultiplier, width=5),\n",
- " MultIdent(SlidingWindowMultiplier, width=6),\n",
- " MultIdent(FixedWindowLTRMultiplier, m=2**4),\n",
- " MultIdent(FixedWindowLTRMultiplier, m=2**5),\n",
- " MultIdent(FixedWindowLTRMultiplier, m=2**6),\n",
- " MultIdent(WindowBoothMultiplier, width=4),\n",
- " MultIdent(WindowBoothMultiplier, width=5),\n",
- " MultIdent(WindowBoothMultiplier, width=6)\n",
- "]\n",
- "naf_mults = [\n",
- " MultIdent(WindowNAFMultiplier, width=4),\n",
- " MultIdent(WindowNAFMultiplier, width=5),\n",
- " MultIdent(WindowNAFMultiplier, width=6),\n",
- " MultIdent(BinaryNAFMultiplier)\n",
- "]\n",
- "comb_mults = [\n",
- " MultIdent(CombMultiplier, width=4),\n",
- " MultIdent(CombMultiplier, width=5),\n",
- " MultIdent(CombMultiplier, width=6),\n",
- " MultIdent(BGMWMultiplier, width=4),\n",
- " MultIdent(BGMWMultiplier, width=5),\n",
- " MultIdent(BGMWMultiplier, width=6)\n",
- "]\n",
- "binary_mults = [\n",
- " MultIdent(LTRMultiplier),\n",
- " MultIdent(RTLMultiplier),\n",
- " MultIdent(CoronMultiplier)\n",
- "]\n",
- "other_mults = [\n",
- " MultIdent(FullPrecompMultiplier),\n",
- " MultIdent(SimpleLadderMultiplier)\n",
- "]\n",
- "\n",
- "with_precomputation = window_mults + naf_mults[:-1] + other_mults[:-1] + comb_mults\n",
- "\n",
- "all_mults = window_mults + naf_mults + binary_mults + other_mults + comb_mults"
+ "# Setup the ticks and colors deterministically.\n",
+ "mult_klasses = sorted(list(set(map(lambda mult: mult.klass, all_mults))), key=lambda klass: klass.__name__)\n",
+ "mult_kwarg_map = {klass: 0 for klass in mult_klasses}\n",
+ "mult_colors = matplotlib.cm.tab20(range(len(mult_klasses)))\n",
+ "mult_styles = ['-', '--', '-.', ':', (5, (10, 3)), (0, (5, 1)), (0, (3, 1, 1, 1, 1, 1)), (0, (3, 1, 1, 1)), (0, (1, 1)), (0, (3, 10, 1, 10))]\n",
+ "colors = {}\n",
+ "styles = {}\n",
+ "for mult in all_mults:\n",
+ " colors[mult] = mult_colors[mult_klasses.index(mult.klass)]\n",
+ " styles[mult] = mult_styles[mult_kwarg_map[mult.klass]]\n",
+ " mult_kwarg_map[mult.klass] += 1"
]
},
{
@@ -81,14 +60,12 @@
"outputs": [],
"source": [
"def divides_any(l: int, small_scalars: set[int]) -> bool:\n",
- " if l in small_scalars:\n",
- " return True\n",
" for s in small_scalars:\n",
" if s%l==0:\n",
" return True\n",
" return False\n",
"\n",
- "def process_small_scalars(scalar_results: MultResults, divisors: set[int]) -> dict[int, float]:\n",
+ "def process_small_scalars(scalar_results: MultResults, divisors: set[int]) -> ProbMap:\n",
" result = {}\n",
" for divisor in tqdm(divisors, leave=False):\n",
" count = 0\n",
@@ -96,29 +73,7 @@
" if divides_any(divisor, smult):\n",
" count += 1\n",
" result[divisor] = count / scalar_results.samples\n",
- " return result\n",
- "\n",
- "def merge_probs(*prob_maps: dict[int, float]) -> dict[int, float]:\n",
- " # Merge two or more maps of \"small-scalar\" -> \"probability\" together by averaging them.\n",
- " # This is correct if they were collected with the same amount of samples. If the\n",
- " # amount of samples differs a lot this will not update as much as it should, but will\n",
- " # update in the correct direction nonetheless.\n",
- " counter = Counter()\n",
- " nprobs = len(prob_maps)\n",
- " for prob_map in prob_maps:\n",
- " for k, v in prob_map.items():\n",
- " counter[k] += v\n",
- " return {k: v / nprobs for k, v in counter.items()}\n",
- "\n",
- "def mult_label(mult: MultIdent | ScalarMultiplier) -> str:\n",
- " if isinstance(mult, ScalarMultiplier):\n",
- " for attr in (\"width\", \"m\"):\n",
- " if not hasattr(mult, attr):\n",
- " continue\n",
- " return f\"{mult.__class__.__name__}_{getattr(mult, attr)}\"\n",
- " return mult.__class__.__name__\n",
- " elif isinstance(mult, MultIdent):\n",
- " return str(mult)"
+ " return ProbMap(result, scalar_results.samples)"
]
},
{
@@ -128,7 +83,7 @@
"metadata": {},
"outputs": [],
"source": [
- "def powers_of(k, max_power=10):\n",
+ "def powers_of(k, max_power=20):\n",
" return [k**i for i in range(1, max_power)]\n",
"\n",
"def prod_combine(one, other):\n",
@@ -137,21 +92,39 @@
"small_primes = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199]\n",
"medium_primes = [211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397]\n",
"large_primes = [401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]\n",
- "all_integers = list(range(1, 100))\n",
+ "all_integers = list(range(1, 400))\n",
+ "all_even = list(range(2, 400, 2))\n",
+ "all_odd = list(range(1, 400, 2))\n",
+ "all_primes = small_primes + medium_primes + large_primes\n",
"\n",
- "all_divisors = small_primes + medium_primes + large_primes #+ powers_of(2)"
+ "divisor_map = {\n",
+ " \"small_primes\": small_primes,\n",
+ " \"medium_primes\": medium_primes,\n",
+ " \"large_primes\": large_primes,\n",
+ " \"all_primes\": all_primes,\n",
+ " \"all_integers\": all_integers,\n",
+ " \"all_even\": all_even,\n",
+ " \"all_odd\": all_odd,\n",
+ " \"powers_of_2\": powers_of(2),\n",
+ " \"powers_of_2_large\": powers_of(2, 130),\n",
+ " \"powers_of_2_large_p1\": [i+1 for i in powers_of(2, 130)],\n",
+ " \"powers_of_3\": powers_of(3),\n",
+ "}\n",
+ "divisor_map[\"all\"] = list(sorted(set().union(*[v for v in divisor_map.values()])))"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "318ca5ac-66a6-4187-a01f-f0e2d27ba34e",
+ "id": "638f8634-1f6e-4844-a796-096611dfbac2",
"metadata": {},
"outputs": [],
"source": [
- "# Load\n",
- "with open(f\"multiples_{category}_{curve}_{bits}\", \"rb\") as f:\n",
- " multiples_mults = pickle.load(f)"
+ "category = \"secg\"\n",
+ "curve = \"secp256r1\"\n",
+ "params = get_params(category, curve, \"projective\")\n",
+ "bits = params.order.bit_length()\n",
+ "num_workers = 10"
]
},
{
@@ -162,19 +135,36 @@
"outputs": [],
"source": [
"selected_mults = all_mults\n",
- "selected_divisors = all_divisors"
+ "divisor_name = \"powers_of_2_large_p1\"\n",
+ "selected_divisors = divisor_map[divisor_name]"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "6b42d25f-6ce1-477a-bc5f-d7c2a8af87a3",
+ "id": "4d2b0f1a-b50a-4548-b63e-c6507e95503d",
"metadata": {},
"outputs": [],
"source": [
"distributions_mults = {}\n",
- "for mult, results in tqdm(multiples_mults.items()):\n",
- " distributions_mults[mult] = process_small_scalars(results, selected_divisors)"
+ "files = tqdm(glob.glob(f\"multiples_{category}_{curve}_{bits}_chunk*.pickle\"))\n",
+ "with TaskExecutor(max_workers=num_workers) as pool:\n",
+ " for fname in files:\n",
+ " files.set_postfix_str(fname)\n",
+ " with open(fname, \"rb\") as f:\n",
+ " multiples_loaded = pickle.load(f)\n",
+ " for mult, results in multiples_loaded.items():\n",
+ " pool.submit_task(mult,\n",
+ " process_small_scalars,\n",
+ " results, selected_divisors)\n",
+ " for mult, future in tqdm(pool.as_completed(), leave=False, total=len(pool.tasks)):\n",
+ " prob_map = future.result()\n",
+ " if mult in distributions_mults:\n",
+ " distributions_mults[mult].merge(prob_map)\n",
+ " else:\n",
+ " distributions_mults[mult] = prob_map\n",
+ " del multiples_loaded\n",
+ " gc.collect()"
]
},
{
@@ -187,24 +177,86 @@
"plot_mults = selected_mults\n",
"plot_divisors = selected_divisors\n",
"\n",
- "colors = {mult:matplotlib.cm.tab20(range(len(plot_mults)))[i] for i,mult in enumerate(plot_mults)}\n",
- "\n",
- "fig = plt.subplots(figsize=(36, 12))\n",
+ "fig = plt.figure(figsize=(58, 12))\n",
+ "ax = plt.subplot(111)\n",
"\n",
"L = len(plot_divisors)\n",
+ "N = len(plot_mults)\n",
"plot_divisors = sorted(plot_divisors)\n",
- "for mult in plot_mults:\n",
+ "vals = np.zeros((N, L))\n",
+ "for i, mult in enumerate(plot_mults):\n",
" y_values = [distributions_mults[mult][l] for l in plot_divisors]\n",
- " plt.plot(list(range(L)), y_values, color=colors[mult], label=str(mult))\n",
- "plt.plot(list(range(L)), var / np.max(var), label=\"cross-mult variance (normalized)\", ls=\"--\", lw=2, color=\"black\")\n",
- "plt.xlabel(\"divisors\") \n",
- "plt.ylabel(\"error probability\")\n",
- "plt.xticks(list(range(L)), plot_divisors, rotation=90)\n",
+ " vals[i,] = y_values\n",
+ " ax.plot(list(range(L)), y_values, color=colors[mult], linestyle=styles[mult], label=str(mult))\n",
+ " ax.set_title(divisor_name + \"\\nSamples: \" + str(distributions_mults[mult].samples))\n",
"\n",
- "plt.grid()\n",
- "plt.legend()\n",
- "plt.show() \n",
- "fig[0].savefig(f\"graphs/re.png\",dpi=300)"
+ "var = np.var(vals, axis=0)\n",
+ "#ax.plot(list(range(L)), var / np.max(var), label=\"cross-mult variance (normalized)\", ls=\"--\", lw=2, color=\"black\")\n",
+ "\n",
+ "ax.set_xlabel(\"divisors\") \n",
+ "ax.set_ylabel(\"error probability\")\n",
+ "ax.set_xticks(list(range(L)), plot_divisors, rotation=90)\n",
+ "\n",
+ "ax.grid()\n",
+ "plt.tight_layout()\n",
+ "box = ax.get_position()\n",
+ "ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])\n",
+ "\n",
+ "# Put a legend to the right of the current axis\n",
+ "ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n",
+ "plt.show()\n",
+ "fig.savefig(f\"graphs/{divisor_name}.png\",dpi=300);"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8454cb7a-5308-43c6-9cd0-5de7946ec72a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# general_distributions = get_general_distributions(selected_divisors, bits, samples)\n",
+ "# general_n_distributions = get_general_n_distributions(selected_divisors, bits, 256, samples)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "90a654f0-accc-4bb9-be2d-5f03e2d1e543",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n"
]
},
{
@@ -245,17 +297,6 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "8454cb7a-5308-43c6-9cd0-5de7946ec72a",
- "metadata": {},
- "outputs": [],
- "source": [
- "# general_distributions = get_general_distributions(selected_divisors, bits, samples)\n",
- "# general_n_distributions = get_general_n_distributions(selected_divisors, bits, 256, samples)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
"id": "8eae8df8-0bf8-4a9d-a55e-deea6a9d6b07",
"metadata": {},
"outputs": [],
@@ -662,7 +703,15 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "37a64963-e63a-4c74-8414-6d29482e7151",
+ "id": "24adaa81-df58-47a9-bdea-0b373be4368a",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ae10629c-72f7-4a66-8495-8302ffa61887",
"metadata": {},
"outputs": [],
"source": []