diff options
Diffstat (limited to 'analysis/scalarmults/simulate.ipynb')
| -rw-r--r-- | analysis/scalarmults/simulate.ipynb | 268 |
1 files changed, 101 insertions, 167 deletions
diff --git a/analysis/scalarmults/simulate.ipynb b/analysis/scalarmults/simulate.ipynb index c1f123a..07f0e8f 100644 --- a/analysis/scalarmults/simulate.ipynb +++ b/analysis/scalarmults/simulate.ipynb @@ -25,12 +25,11 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "b4386513-cc14-434b-a748-2863f8657452", "metadata": {}, "outputs": [], "source": [ - "import itertools\n", "import gc\n", "import glob\n", "import hashlib\n", @@ -52,7 +51,8 @@ "\n", "from collections import Counter\n", "from pathlib import Path\n", - "from random import randint, randbytes\n", + "from random import randint, randbytes, randrange\n", + "from functools import partial\n", "from typing import Type, Any\n", "\n", "from tqdm.auto import tqdm, trange\n", @@ -64,7 +64,7 @@ "from pyecsca.sca.re.epa import graph_to_check_inputs, evaluate_checks\n", "from pyecsca.misc.utils import TaskExecutor\n", "\n", - "from common import *" + "from epare import *" ] }, { @@ -80,7 +80,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "3463a7bd-34d8-458b-8ceb-dddf99de21dc", "metadata": {}, "outputs": [], @@ -97,10 +97,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "id": "170c11fc-86cf-4eb1-bf4e-b2e44b2d7ac5", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Scalar multipliers considered: 65\n", + "Scalar multipliers (with a combination of up-to two countermeasures) considered: 5265\n", + "Error models considered: 32\n", + "Total configurations considered: 168480\n" + ] + } + ], "source": [ "nmults = len(all_mults)\n", "nmults_ctr = len(all_mults_with_ctr)\n", @@ -108,7 +119,7 @@ "ncfgs = nmults_ctr * nerror_models\n", "\n", "print(f\"Scalar multipliers considered: {nmults}\")\n", - "print(f\"Scalar multipliers (with a single countermeasure) considered: {nmults_ctr}\")\n", + "print(f\"Scalar multipliers (with a combination of up-to two countermeasures) considered: {nmults_ctr}\")\n", "print(f\"Error models considered: {nerror_models}\")\n", "print(f\"Total configurations considered: {ncfgs}\")" ] @@ -126,7 +137,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "id": "4d5c7f10-618f-4612-b594-81d1607b0d1d", "metadata": {}, "outputs": [], @@ -139,170 +150,19 @@ "use_init = True\n", "use_multiply = True\n", "\n", - "num_workers = 30\n", + "num_workers = 20\n", "samples = 1000\n", "\n", "selected_mults = all_mults" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "07bc266d-35eb-4f6d-bdba-e9f6f66827f1", - "metadata": {}, - "outputs": [], - "source": [ - "def simulate_multiples(mult: MultIdent,\n", - " params: DomainParameters,\n", - " bits: int,\n", - " samples: int = 100,\n", - " seed: bytes | None = None) -> MultResults:\n", - " \"\"\"\n", - " Takes a MultIdent, which specifies a scalar multiplier (with an optional countermeasure)\n", - " and simulates `samples` scalar multiplications, while tracking which multiples of the\n", - " symbolic input point get computed.\n", - " \"\"\"\n", - " results = []\n", - " if seed is not None:\n", - " random.seed(seed)\n", - "\n", - " # If no countermeasure is used, we have fully random scalars.\n", - " # Otherwise, fix one per chunk.\n", - " if mult.countermeasure is None:\n", - " scalars = [random.randint(1, 2**bits) for _ in range(samples)]\n", - " else:\n", - " one = random.randint(1, 2**bits)\n", - " scalars = [one for _ in range(samples)]\n", - "\n", - " for scalar in scalars:\n", - " results.append(multiple_graph(scalar, params, mult.klass, mult.partial))\n", - " return MultResults(results, samples)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6f3e9ffa-19e8-46b2-a6ad-1d318d5c8e17", - "metadata": {}, - "outputs": [], - "source": [ - "def simulate_multiples_direct(mult: MultIdent,\n", - " params: DomainParameters,\n", - " bits: int,\n", - " fname: str,\n", - " samples: int = 100,\n", - " seed: bytes | None = None) -> str:\n", - " \"\"\"\n", - " Like the `simulate_multiples` function above, but stores the pickled output directly\n", - " into a file named `fname`.\n", - " \"\"\"\n", - " results = []\n", - " if seed is not None:\n", - " random.seed(seed)\n", - "\n", - " # If no countermeasure is used, we have fully random scalars.\n", - " # Otherwise, fix one per chunk.\n", - " if mult.countermeasure is None:\n", - " scalars = [random.randint(1, 2**bits) for _ in range(samples)]\n", - " else:\n", - " one = random.randint(1, 2**bits)\n", - " scalars = [one for _ in range(samples)]\n", - "\n", - " for scalar in scalars:\n", - " results.append(multiple_graph(scalar, params, mult.klass, mult.partial))\n", - " result = MultResults(results, samples)\n", - " with open(fname, \"wb\") as f:\n", - " pickle.dump((mult, result), f)\n", - " return fname" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64799c16-8113-4eff-81de-6a3e547eb5c5", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_multiples(mult: MultIdent,\n", - " res: MultResults,\n", - " divisors: set[int],\n", - " use_init: bool = True,\n", - " use_multiply: bool = True):\n", - " \"\"\"\n", - " Takes MultIdent and MultResults and a set of divisors (base point orders `q`) and\n", - " evaluates them using the error model from the MultIdent. Note that the MultIdent\n", - " must have an error model in this case. Returns the ProbMap.\n", - " \"\"\"\n", - " errors = {divisor: 0 for divisor in divisors}\n", - " samples = len(res)\n", - " divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()\n", - " for precomp_ctx, full_ctx, out in res:\n", - " check_inputs = graph_to_check_inputs(precomp_ctx, full_ctx, out,\n", - " check_condition=mult.error_model.check_condition,\n", - " precomp_to_affine=mult.error_model.precomp_to_affine,\n", - " use_init=use_init,\n", - " use_multiply=use_multiply)\n", - " for q in divisors:\n", - " error = evaluate_checks(check_funcs={\"add\": mult.error_model.check_add(q), \"affine\": mult.error_model.check_affine(q)},\n", - " check_inputs=check_inputs)\n", - " errors[q] += error\n", - " # Make probmaps smaller. Do not store zero probabilities.\n", - " probs = {}\n", - " for q, error in errors.items():\n", - " if error != 0:\n", - " probs[q] = error / samples\n", - " return ProbMap(probs, divisors_hash, samples)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ac630a08-4120-41cf-b3bb-1827ef469542", - "metadata": {}, - "outputs": [], - "source": [ - "def evaluate_multiples_direct(mult: MultIdent,\n", - " fname: str,\n", - " offset: int,\n", - " divisors: set[int],\n", - " use_init: bool = True,\n", - " use_multiply: bool = True):\n", - " \"\"\"\n", - " Like `evaluate_multiples`, but instead reads the MultResults from a file named `fname`\n", - " at an `offset`. Still returns the ProbMap, which is significantly smaller and easier\n", - " to pickle than the MultResults.\n", - " \"\"\"\n", - " with open(fname, \"rb\") as f:\n", - " f.seek(offset)\n", - " _, res = pickle.load(f)\n", - " errors = {divisor: 0 for divisor in divisors}\n", - " samples = len(res)\n", - " divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()\n", - " for precomp_ctx, full_ctx, out in res:\n", - " check_inputs = graph_to_check_inputs(precomp_ctx, full_ctx, out,\n", - " check_condition=mult.error_model.check_condition,\n", - " precomp_to_affine=mult.error_model.precomp_to_affine,\n", - " use_init=use_init,\n", - " use_multiply=use_multiply)\n", - " for q in divisors:\n", - " error = evaluate_checks(check_funcs={\"add\": mult.error_model.check_add(q), \"affine\": mult.error_model.check_affine(q)},\n", - " check_inputs=check_inputs)\n", - " errors[q] += error\n", - " # Make probmaps smaller. Do not store zero probabilities.\n", - " probs = {}\n", - " for q, error in errors.items():\n", - " if error != 0:\n", - " probs[q] = error / samples\n", - " return ProbMap(probs, divisors_hash, samples)" - ] - }, - { "cell_type": "markdown", "id": "3aaf712e-5b97-4390-8dd4-e1db1dfe36a2", "metadata": {}, "source": [ "## Run\n", - "Run this cell as many times as you want. It will simulate `samples` scalar multiplications for each `MultIdent` (a scalar multiplier implementation with an optional countermeasure) and store them into the chunk." + "Run this cell as many times as you want. It will simulate `samples` scalar multiplications for each `Config` (a scalar multiplier implementation with an optional countermeasure) and store them into the chunk." ] }, { @@ -310,12 +170,35 @@ "execution_count": null, "id": "84359084-4116-436c-92cd-d43fdfeca842", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f7b8ac781be9408db32f219b4b2290a0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Computing multiple graphs.: 0%| | 0/5265 [00:00<?, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/xjancar/pyecsca/virt/lib/python3.12/site-packages/loky/process_executor.py:782: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.\n", + " warnings.warn(\n" + ] + } + ], "source": [ "chunk_id = randbytes(4).hex()\n", "with TaskExecutor(max_workers=num_workers, initializer=silence) as pool, tempfile.TemporaryDirectory() as tmp_dirname:\n", " tmp_path = Path(tmp_dirname)\n", - " for i, mult in enumerate(all_mults_with_ctr):\n", + " for i, mult in enumerate(all_configs):\n", " pool.submit_task(mult,\n", " simulate_multiples_direct,\n", " mult, params, bits, tmp_path / f\"{i}.pickle\", samples, seed=chunk_id)\n", @@ -324,7 +207,8 @@ " #print(f\"Got {mult}.\")\n", " if error := future.exception():\n", " print(\"Error!\", error)\n", - " continue\n", + " break\n", + " #continue\n", " fpath = future.result()\n", " with fpath.open(\"rb\") as f:\n", " h.write(f.read())\n", @@ -351,7 +235,57 @@ "execution_count": null, "id": "fbab8333-b8f1-4890-b38a-7bb34f5ffb02", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f4bc80bd47b345e0a74ad1a94121bfb1", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Processing chunks: 0%| | 0/1 [00:00<?, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing chunk 316daffc, no probmaps found.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "38046ebec4d34694a973ab1959f14905", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading chunk 316daffc.: 0%| | 0/5265 [00:00<?, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "76e3fb85f95e41069c2b7fbe86b27650", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Processing 316daffc.: 0%| | 0/168480 [00:00<?, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "with TaskExecutor(max_workers=num_workers, initializer=silence) as pool:\n", " for in_fname in tqdm(glob.glob(f\"multiples_{bits}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n", @@ -364,7 +298,7 @@ " out_file = Path(out_fname)\n", "\n", " cfgs_todo = set()\n", - " for mult in all_mults_with_ctr:\n", + " for mult in all_configs:\n", " for error_model in all_error_models:\n", " cfgs_todo.add(mult.with_error_model(error_model))\n", "\n", |
