aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJ08nY2025-08-01 16:20:49 +0200
committerJ08nY2025-08-01 16:20:49 +0200
commit47a09e109462a424979230f0cac819124a166682 (patch)
treeb0c801a4084bfcca7cf56a2f2e3aa9ff61bbfc08
parent7754e59a7b1affec5db915bb5af8ab45d28f2953 (diff)
downloadECTester-47a09e109462a424979230f0cac819124a166682.tar.gz
ECTester-47a09e109462a424979230f0cac819124a166682.tar.zst
ECTester-47a09e109462a424979230f0cac819124a166682.zip
Allow more robust processing into probmaps.
-rw-r--r--analysis/scalarmults/common.py32
-rw-r--r--analysis/scalarmults/simulate.ipynb138
2 files changed, 126 insertions, 44 deletions
diff --git a/analysis/scalarmults/common.py b/analysis/scalarmults/common.py
index eda6f5f..c2369f6 100644
--- a/analysis/scalarmults/common.py
+++ b/analysis/scalarmults/common.py
@@ -51,6 +51,17 @@ checks_affine = {
}
+def powers_of(k, max_power=20):
+ return [k**i for i in range(1, max_power)]
+
+def prod_combine(one, other):
+ return [a * b for a, b in itertools.product(one, other)]
+
+def powerset(iterable):
+ s = list(iterable)
+ return map(set, itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s)+1)))
+
+
@dataclass(frozen=True)
@total_ordering
class ErrorModel:
@@ -96,6 +107,17 @@ class ErrorModel:
precomp = "+pre" if self.precomp_to_affine else ""
return f"({','.join(cs)}+{self.check_condition}{precomp})"
+ def __hash__(self):
+ return hash((tuple(sorted(self.checks)), self.check_condition, self.precomp_to_affine))
+
+
+all_error_models = []
+for checks in powerset(checks_add):
+ for precomp_to_affine in (True, False):
+ for check_condition in ("all", "necessary"):
+ error_model = ErrorModel(checks, check_condition=check_condition, precomp_to_affine=precomp_to_affine)
+ all_error_models.append(error_model)
+
@dataclass(frozen=True)
@total_ordering
@@ -141,7 +163,7 @@ class MultIdent:
def with_error_model(self, error_model: ErrorModel | None):
if not (isinstance(error_model, ErrorModel) or error_model is None):
raise ValueError("Unknown error model.")
- return MultIdent(self.klass, *self.args, **self.kwargs, error_model=error_model)
+ return MultIdent(self.klass, *self.args, **self.kwargs, countermeasure=self.countermeasure, error_model=error_model)
def __str__(self):
name = self.klass.__name__.replace("Multiplier", "")
@@ -322,15 +344,7 @@ all_mults = window_mults + naf_mults + binary_mults + other_mults + comb_mults
all_mults_with_ctr = [mult.with_countermeasure(ctr) for mult in all_mults for ctr in (None, "gsr", "additive", "multiplicative", "euclidean", "bt")]
-def powers_of(k, max_power=20):
- return [k**i for i in range(1, max_power)]
-def prod_combine(one, other):
- return [a * b for a, b in itertools.product(one, other)]
-
-def powerset(iterable):
- s = list(iterable)
- return map(set, itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s)+1)))
small_primes = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199]
diff --git a/analysis/scalarmults/simulate.ipynb b/analysis/scalarmults/simulate.ipynb
index 47c907d..4c06b10 100644
--- a/analysis/scalarmults/simulate.ipynb
+++ b/analysis/scalarmults/simulate.ipynb
@@ -79,21 +79,19 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "a660e3ac-401b-47a0-92de-55afe63c420a",
+ "id": "170c11fc-86cf-4eb1-bf4e-b2e44b2d7ac5",
"metadata": {},
"outputs": [],
"source": [
- "print(len(all_mults))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "a95b27fc-96a9-41b5-9972-dc8386ed386d",
- "metadata": {},
- "outputs": [],
- "source": [
- "print(len(all_mults_with_ctr))"
+ "nmults = len(all_mults)\n",
+ "nmults_ctr = len(all_mults_with_ctr)\n",
+ "nerror_models = len(all_error_models)\n",
+ "ncfgs = nmults_ctr * nerror_models\n",
+ "\n",
+ "print(f\"Scalar multipliers considered: {nmults}\")\n",
+ "print(f\"Scalar multipliers (with a single countermeasure) considered: {nmults_ctr}\")\n",
+ "print(f\"Error models considered: {nerror_models}\")\n",
+ "print(f\"Total configurations considered: {ncfgs}\")"
]
},
{
@@ -182,6 +180,36 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ac630a08-4120-41cf-b3bb-1827ef469542",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def evaluate_multiples_direct(mult: MultIdent, fname: str, offset: int, divisors: set[int]):\n",
+ " with open(fname, \"rb\") as f:\n",
+ " f.seek(offset)\n",
+ " _, res = pickle.load(f)\n",
+ " errors = {divisor: 0 for divisor in divisors}\n",
+ " samples = len(res)\n",
+ " divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()\n",
+ " for ctx, out in res:\n",
+ " check_inputs = graph_to_check_inputs(ctx, out,\n",
+ " check_condition=mult.error_model.check_condition,\n",
+ " precomp_to_affine=mult.error_model.precomp_to_affine)\n",
+ " for q in divisors:\n",
+ " error = evaluate_checks(check_funcs={\"add\": mult.error_model.check_add(q), \"affine\": mult.error_model.check_affine(q)},\n",
+ " check_inputs=check_inputs)\n",
+ " errors[q] += error\n",
+ " # Make probmaps smaller. Do not store zero probabilities.\n",
+ " probs = {}\n",
+ " for q, error in errors.items():\n",
+ " if error != 0:\n",
+ " probs[q] = error / samples\n",
+ " return ProbMap(probs, divisors_hash, samples)"
+ ]
+ },
+ {
"cell_type": "markdown",
"id": "3aaf712e-5b97-4390-8dd4-e1db1dfe36a2",
"metadata": {},
@@ -201,12 +229,10 @@
"source": [
"chunk_id = randbytes(4).hex()\n",
"with TaskExecutor(max_workers=num_workers, initializer=silence) as pool:\n",
- " for mult in selected_mults:\n",
- " for countermeasure in (None, \"gsr\", \"additive\", \"multiplicative\", \"euclidean\", \"bt\"):\n",
- " full = mult.with_countermeasure(countermeasure)\n",
- " pool.submit_task(full,\n",
- " simulate_multiples,\n",
- " full, params, bits, samples, seed=chunk_id, use_init=use_init, use_multiply=use_multiply)\n",
+ " for mult in all_mults_with_ctr:\n",
+ " pool.submit_task(mult,\n",
+ " simulate_multiples,\n",
+ " mult, params, bits, samples, seed=chunk_id, use_init=use_init, use_multiply=use_multiply)\n",
" with open(f\"multiples_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk{chunk_id}.pickle\",\"wb\") as h:\n",
" for mult, future in tqdm(pool.as_completed(), desc=\"Computing multiple graphs.\", total=len(pool.tasks)):\n",
" print(f\"Got {mult}.\")\n",
@@ -234,38 +260,80 @@
"outputs": [],
"source": [
"with TaskExecutor(max_workers=num_workers, initializer=silence) as pool:\n",
- " for in_fname in glob.glob(f\"multiples_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk*.pickle\"):\n",
+ " for in_fname in tqdm(glob.glob(f\"multiples_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n",
+ " \n",
" match = re.match(\"multiples_(?P<bits>[0-9]+)_(?P<init>(?:no)?init)_(?P<mult>(?:no)?mult)_chunk(?P<id>[0-9a-f]+).pickle\", in_fname)\n",
" bits = match.group(\"bits\")\n",
" use_init = match.group(\"init\")\n",
" use_multiply = match.group(\"mult\")\n",
" chunk_id = match.group(\"id\")\n",
" out_fname = f\"probs_{bits}_{use_init}_{use_multiply}_chunk{chunk_id}.pickle\"\n",
- " with open(in_fname, \"rb\") as f, open(out_fname, \"wb\") as h:\n",
- " bar = tqdm(total=len(all_mults_with_ctr), desc=f\"Loading chunk {chunk_id}.\")\n",
+ "\n",
+ "\n",
+ " in_file = Path(in_fname)\n",
+ " out_file = Path(out_fname)\n",
+ "\n",
+ " cfgs_todo = set()\n",
+ " for mult in all_mults_with_ctr:\n",
+ " for error_model in all_error_models:\n",
+ " cfgs_todo.add(mult.with_error_model(error_model))\n",
+ "\n",
+ " if out_file.exists():\n",
+ " print(f\"Processing chunk {chunk_id}, some(or all) probmaps found.\")\n",
+ " with out_file.open(\"r+b\") as f:\n",
+ " while True:\n",
+ " try:\n",
+ " full, _ = pickle.load(f)\n",
+ " cfgs_todo.remove(full)\n",
+ " last_end = f.tell()\n",
+ " except EOFError:\n",
+ " break\n",
+ " except pickle.UnpicklingError:\n",
+ " f.truncate(last_end)\n",
+ " if not cfgs_todo:\n",
+ " print(f\"Chunk complete. Continuing...\")\n",
+ " else:\n",
+ " print(f\"Chunk missing {len(cfgs_todo)} probmaps, computing...\")\n",
+ " else:\n",
+ " print(f\"Processing chunk {chunk_id}, no probmaps found.\")\n",
+ " \n",
+ " with in_file.open(\"rb\") as f, out_file.open(\"ab\") as h:\n",
+ " loading_bar = tqdm(total=nmults_ctr, desc=f\"Loading chunk {chunk_id}.\", smoothing=0)\n",
+ " processing_bar = tqdm(total=len(cfgs_todo), desc=f\"Processing {chunk_id}.\", smoothing=0)\n",
" while True:\n",
" try:\n",
+ " start = f.tell()\n",
" mult, vals = pickle.load(f)\n",
- " bar.update(1)\n",
- " for checks in powerset(checks_add):\n",
- " for precomp_to_affine in (True, False):\n",
- " for check_condition in (\"all\", \"necessary\"):\n",
- " error_model = ErrorModel(checks, check_condition=check_condition, precomp_to_affine=precomp_to_affine)\n",
- " full = mult.with_error_model(error_model)\n",
- " pool.submit_task(full,\n",
- " evaluate_multiples,\n",
- " full, vals, divisor_map[\"all\"])\n",
+ " loading_bar.update(1)\n",
+ " for error_model in all_error_models:\n",
+ " full = mult.with_error_model(error_model)\n",
+ " if full in cfgs_todo:\n",
+ " # Pass the file name and offset to speed up computation start.\n",
+ " pool.submit_task(full,\n",
+ " evaluate_multiples_direct,\n",
+ " full, in_fname, start, divisor_map[\"all\"])\n",
" gc.collect()\n",
+ " if len(pool.tasks) > 1000:\n",
+ " for full, future in pool.as_completed():\n",
+ " processing_bar.update(1)\n",
+ " if error := future.exception():\n",
+ " print(\"Error!\", full, error)\n",
+ " continue\n",
+ " res = future.result()\n",
+ " pickle.dump((full, res), h)\n",
" except EOFError:\n",
" break\n",
- " for full, future in tqdm(pool.as_completed(), desc=\"Computing errors.\", total=len(pool.tasks)):\n",
- " print(f\"Got {full}.\")\n",
+ " except pickle.UnpicklingError:\n",
+ " print(\"Bad unpickling, the multiples file is likely truncated.\")\n",
+ " break\n",
+ " for full, future in pool.as_completed():\n",
+ " processing_bar.update(1)\n",
" if error := future.exception():\n",
- " print(\"Error!\", error)\n",
+ " print(\"Error!\", full, error)\n",
" continue\n",
" res = future.result()\n",
- " pickle.dump((full, res), f)\n",
- " gc.collect()"
+ " pickle.dump((full, res), h)\n",
+ " print(\"Chunk completed.\")\n"
]
},
{