aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJ08nY2025-10-26 14:45:00 +0100
committerJ08nY2025-10-26 14:45:00 +0100
commitce1f28417f7cf67c1e53873b0929600ff05bf4cc (patch)
tree10e9a7268ca6cd8f85a7a12e50e7b5e8e2ca9321
parente94b083c3d71c2023043bbd20fec6c64b11abb53 (diff)
downloadECTester-ce1f28417f7cf67c1e53873b0929600ff05bf4cc.tar.gz
ECTester-ce1f28417f7cf67c1e53873b0929600ff05bf4cc.tar.zst
ECTester-ce1f28417f7cf67c1e53873b0929600ff05bf4cc.zip
-rw-r--r--analysis/scalarmults/epare/simulate.py27
-rw-r--r--analysis/scalarmults/simulate.ipynb3
-rw-r--r--analysis/scalarmults/standalone/README.md13
-rw-r--r--analysis/scalarmults/standalone/make_multiples.py71
-rw-r--r--analysis/scalarmults/standalone/make_probs.py78
-rw-r--r--analysis/scalarmults/standalone/merge_probs.py36
-rw-r--r--requirements.txt1
7 files changed, 227 insertions, 2 deletions
diff --git a/analysis/scalarmults/epare/simulate.py b/analysis/scalarmults/epare/simulate.py
index 7fa094e..e4251a6 100644
--- a/analysis/scalarmults/epare/simulate.py
+++ b/analysis/scalarmults/epare/simulate.py
@@ -14,6 +14,12 @@ from pyecsca.sca.re.rpa import multiple_graph
from pyecsca.sca.re.epa import graph_to_check_inputs, evaluate_checks
+if sys.version_info >= (3, 14):
+ from compression import zstd
+else:
+ from backports import zstd
+
+
def simulate_multiples(
mult: Config,
params: DomainParameters,
@@ -127,3 +133,24 @@ def evaluate_multiples_direct(
f.seek(offset)
_, res = pickle.load(f)
return evaluate_multiples(mult, res, divisors, use_init, use_multiply)
+
+
+def evaluate_multiples_compressed(
+ mult: Config,
+ fname: str,
+ offset: int,
+ divisors: set[int],
+ use_init: bool = True,
+ use_multiply: bool = True,
+):
+ """
+ Like `evaluate_multiples`, but instead reads the MultResults from a file named `fname`
+ at an `offset` that is a zstd compressed file.
+ Still returns the ProbMap, which is significantly smaller and easier
+ to pickle than the MultResults.
+ """
+ with zstd.open(fname, "rb") as f:
+ f.seek(offset)
+ _, res = pickle.load(f)
+ return evaluate_multiples(mult, res, divisors, use_init, use_multiply)
+
diff --git a/analysis/scalarmults/simulate.ipynb b/analysis/scalarmults/simulate.ipynb
index 07f0e8f..d79e447 100644
--- a/analysis/scalarmults/simulate.ipynb
+++ b/analysis/scalarmults/simulate.ipynb
@@ -207,8 +207,7 @@
" #print(f\"Got {mult}.\")\n",
" if error := future.exception():\n",
" print(\"Error!\", error)\n",
- " break\n",
- " #continue\n",
+ " continue\n",
" fpath = future.result()\n",
" with fpath.open(\"rb\") as f:\n",
" h.write(f.read())\n",
diff --git a/analysis/scalarmults/standalone/README.md b/analysis/scalarmults/standalone/README.md
new file mode 100644
index 0000000..f34187e
--- /dev/null
+++ b/analysis/scalarmults/standalone/README.md
@@ -0,0 +1,13 @@
+The idea is to go
+
+Pick a random seed (hex)
+
+make_multiples (one 70GB file), one job
+ -> multiples_<seed>.zpickle
+
+make_probs (one small file), one job per multiples file
+ -> probs_<seed>.pickle
+
+merge_probs (one small file), one job for all
+ -> merged.pickle
+
diff --git a/analysis/scalarmults/standalone/make_multiples.py b/analysis/scalarmults/standalone/make_multiples.py
new file mode 100644
index 0000000..d922616
--- /dev/null
+++ b/analysis/scalarmults/standalone/make_multiples.py
@@ -0,0 +1,71 @@
+"""
+Make a multiples file for all configs (without error models).
+
+The file is rather large as it stores the full multiple graphs
+as pickles for a number of samples for each config. For now this is
+around 300 GB for samples = 1000.
+"""
+import atexit
+import pickle
+import sys
+import time
+
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+import click
+
+from tqdm import tqdm
+
+from pyecsca.ec.params import get_params
+from pyecsca.misc.utils import TaskExecutor
+from epare import all_configs, simulate_multiples_direct
+
+
+if sys.version_info >= (3, 14):
+ from compression import zstd
+else:
+ from backports import zstd
+
+
+@click.command()
+@click.option("temp", "--temp", envvar="SCRATCHDIR", type=click.Path(file_okay=False, dir_okay=True, path_type=Path), default=None)
+@click.option("workers", "--workers", type=int, required=True)
+@click.option("seed", "--seed", required=True)
+@click.option("samples", "--samples", type=int, default=100)
+def main(temp, workers, seed, samples):
+ category = "secg"
+ curve = "secp256r1"
+ params = get_params(category, curve, "projective")
+ bits = params.order.bit_length()
+
+ if temp is None:
+ tmp = TemporaryDirectory()
+ temp = Path(tmp)
+ atexit.register(tmp.close)
+
+ output = f"multiples_{seed}.zpickle"
+
+ with TaskExecutor(max_workers=workers) as pool:
+ for i, mult in enumerate(all_configs):
+ pool.submit_task(mult,
+ simulate_multiples_direct,
+ mult, params, bits, temp / f"m{seed}_{i}.pickle", samples, seed=seed)
+ with zstd.open(output, "wb") as h:
+ # TODO: This may not read the results fast enough and the scratchdir may fill up if it is not capable of storing enough temps
+ i = 0
+ for mult, future in tqdm(pool.as_completed(), desc="Computing multiple graphs.", total=len(pool.tasks)):
+ i += 1
+ if error := future.exception():
+ print("Error!", mult, error)
+ continue
+ fpath = future.result()
+ with fpath.open("rb") as f:
+ h.write(f.read())
+ fpath.unlink()
+ if (i % 100) == 0:
+ time.sleep(5)
+
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/analysis/scalarmults/standalone/make_probs.py b/analysis/scalarmults/standalone/make_probs.py
new file mode 100644
index 0000000..9228180
--- /dev/null
+++ b/analysis/scalarmults/standalone/make_probs.py
@@ -0,0 +1,78 @@
+"""
+Make the probs file from a given multiples file.
+"""
+import atexit
+import pickle
+import sys
+
+from pathlib import Path
+
+import click
+
+from tqdm import tqdm
+
+from pyecsca.ec.params import get_params
+from pyecsca.misc.utils import TaskExecutor
+from epare import all_error_models, evaluate_multiples_compressed, divisor_map
+
+
+if sys.version_info >= (3, 14):
+ from compression import zstd
+else:
+ from backports import zstd
+
+
+@click.command()
+@click.option("temp", "--temp", envvar="SCRATCHDIR", type=click.Path(file_okay=False, dir_okay=True, path_type=Path), default=None)
+@click.option("workers", "--workers", type=int, required=True)
+@click.option("seed", "--seed", required=True)
+def main(temp, workers, seed):
+ category = "secg"
+ curve = "secp256r1"
+ params = get_params(category, curve, "projective")
+ bits = params.order.bit_length()
+
+ if temp is None:
+ tmp = TemporaryDirectory()
+ temp = Path(tmp)
+ atexit.register(tmp.close)
+
+ use_init = True
+ use_multiply = True
+
+ in_path = Path(f"multiples_{seed}.zpickle")
+ out_path = Path(f"probs_{seed}.pickle")
+
+ with zstd.open(in_path, "rb") as f, out_path.open("wb") as h:
+ while True:
+ try:
+ start = f.tell()
+ mult, vals = pickle.load(f)
+ for error_model in all_error_models:
+ full = mult.with_error_model(error_model)
+ # Pass the file name and offset to speed up computation start.
+ pool.submit_task(full,
+ evaluate_multiples_compressed,
+ full, in_path, start, divisor_map["all"], use_init, use_multiply)
+ gc.collect()
+ for full, future in pool.as_completed(wait=False):
+ if error := future.exception():
+ print("Error!", full, error)
+ continue
+ res = future.result()
+ pickle.dump((full, res), h)
+ except EOFError:
+ break
+ except pickle.UnpicklingError:
+ print("Bad unpickling, the multiples file is likely truncated.")
+ break
+ for full, future in pool.as_completed():
+ if error := future.exception():
+ print("Error!", full, error)
+ continue
+ res = future.result()
+ pickle.dump((full, res), h)
+
+
+if __name__ == "__main__":
+ pass \ No newline at end of file
diff --git a/analysis/scalarmults/standalone/merge_probs.py b/analysis/scalarmults/standalone/merge_probs.py
new file mode 100644
index 0000000..0193382
--- /dev/null
+++ b/analysis/scalarmults/standalone/merge_probs.py
@@ -0,0 +1,36 @@
+"""
+Merge all probs files into one.
+"""
+
+import pickle
+
+import click
+
+from pathlib import Path
+
+from epare import ProbMap
+
+
+@click.command()
+def main():
+ maps = {}
+ for file in Path().glob("probs_*.pickle"):
+ with file.open("rb") as h:
+ while True:
+ try:
+ full, prob_map = pickle.load(h)
+ if full not in maps:
+ maps[full] = prob_map
+ else:
+ maps[full].merge(prob_map)
+ except EOFError:
+ break
+ except pickle.UnpicklingError:
+ print(f"Bad unpickling, the probs file {file} is likely truncated.")
+ break
+ with open("merged.pickle", "wb") as f:
+ pickle.dump(maps, f)
+
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 595b20a..aec46df 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,3 +9,4 @@ tqdm
statsmodels
anytree
scikit-learn
+backports.zstd ; python_version<'3.14'