aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJ08nY2025-08-05 20:07:46 +0200
committerJ08nY2025-08-05 20:07:46 +0200
commite6e2cf8b31996c8dc42eae20c5afad40ba382c38 (patch)
tree2537e1d7b8f30f27130e91ef9f2680e9bdf7587b
parent12dbfa627fecb46bca3e7cff001f3881ece6bdb9 (diff)
downloadECTester-e6e2cf8b31996c8dc42eae20c5afad40ba382c38.tar.gz
ECTester-e6e2cf8b31996c8dc42eae20c5afad40ba382c38.tar.zst
ECTester-e6e2cf8b31996c8dc42eae20c5afad40ba382c38.zip
Make visualize and distinguish notebooks kind of work with the new multidents.
-rw-r--r--analysis/scalarmults/common.py9
-rw-r--r--analysis/scalarmults/distinguish.ipynb72
-rw-r--r--analysis/scalarmults/simulate.ipynb587
-rw-r--r--analysis/scalarmults/visualize.ipynb113
-rw-r--r--requirements.txt1
5 files changed, 204 insertions, 578 deletions
diff --git a/analysis/scalarmults/common.py b/analysis/scalarmults/common.py
index f1299dd..d3a784c 100644
--- a/analysis/scalarmults/common.py
+++ b/analysis/scalarmults/common.py
@@ -1,4 +1,5 @@
import itertools
+import hashlib
from datetime import timedelta
from enum import Enum
from operator import itemgetter
@@ -249,7 +250,13 @@ class ProbMap:
return self.probs.items()
def narrow(self, divisors: set[int]):
- self.probs = {k:v for k, v in self.probs.items() if k in divisors}
+ divisors_hash = hashlib.blake2b(str(sorted(divisors)).encode(), digest_size=8).digest()
+ if self.divisors_hash == divisors_hash:
+ # Already narrow.
+ return
+ for kdel in set(self.probs.keys()).difference(divisors):
+ del self.probs[kdel]
+ self.divisors_hash = divisors_hash
def merge(self, other: "ProbMap") -> None:
if self.divisors_hash != other.divisors_hash:
diff --git a/analysis/scalarmults/distinguish.ipynb b/analysis/scalarmults/distinguish.ipynb
index fdb3f6a..4165d87 100644
--- a/analysis/scalarmults/distinguish.ipynb
+++ b/analysis/scalarmults/distinguish.ipynb
@@ -41,6 +41,23 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "119b1e18-eeb0-4b70-bec3-ff277641403f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def silence():\n",
+ " import warnings\n",
+ " warnings.filterwarnings(\n",
+ " \"ignore\",\n",
+ " message=\"The number of unique classes is greater than 50% of the number of samples.*\",\n",
+ " category=UserWarning\n",
+ " )\n",
+ "silence()"
+ ]
+ },
+ {
"cell_type": "markdown",
"id": "4868c083-8073-453d-b508-704fcb6d6f2a",
"metadata": {},
@@ -57,7 +74,6 @@
"outputs": [],
"source": [
"divisor_name = \"all\"\n",
- "kind = \"all\"\n",
"allfeats = list(filter(lambda feat: feat not in (1,2,3,4,5), divisor_map[divisor_name]))"
]
},
@@ -69,14 +85,10 @@
"outputs": [],
"source": [
"# Load\n",
- "try:\n",
- " with open(f\"{divisor_name}_{kind}_distrs.pickle\", \"rb\") as f:\n",
- " distributions_mults = pickle.load(f)\n",
- "except FileNotFoundError:\n",
- " with open(f\"all_{kind}_distrs.pickle\", \"rb\") as f:\n",
- " distributions_mults = pickle.load(f)\n",
- " for probmap in distributions_mults.values():\n",
- " probmap.narrow(allfeats)"
+ "with open(f\"merged.pickle\", \"rb\") as f:\n",
+ " distributions_mults = pickle.load(f)\n",
+ "for probmap in tqdm(distributions_mults.values()):\n",
+ " probmap.narrow(allfeats)"
]
},
{
@@ -127,7 +139,7 @@
"source": [
"# Now go over all divisors, cluster based on overlapping CI for given n?\n",
"io_map = {mult:{} for mult in allmults}\n",
- "for divisor in allfeats:\n",
+ "for divisor in tqdm(allfeats):\n",
" prev_ci_low = None\n",
" prev_ci_high = None\n",
" groups = {}\n",
@@ -250,7 +262,7 @@
"source": [
"simulations = 1000\n",
"\n",
- "for nattack in trange(100, 10000, 100):\n",
+ "for nattack in trange(100, 10100, 100):\n",
" successes = 0\n",
" pathiness = 0\n",
" for i in range(simulations):\n",
@@ -450,8 +462,19 @@
" scorer):\n",
" classifier.set_params(nattack=nattack)\n",
" classifier.fit(X, y)\n",
+ " #print(\"Make instance\")\n",
+ " #print(X)\n",
+ " #print(y)\n",
" X_samp, y_samp = make_instance(nattack, simulations, X, y)\n",
- " return scorer(classifier, X_samp, y_samp)\n",
+ " #print(\"Here!\")\n",
+ " #print(X_samp)\n",
+ " #print(y_samp)\n",
+ " #print(X_samp.shape, y_samp.shape)\n",
+ " #print(classifier.X_.shape, classifier.y_.shape)\n",
+ " #print(scorer)\n",
+ " score = scorer(classifier, X_samp, y_samp)\n",
+ " #print(score)\n",
+ " return score\n",
"\n",
"\n",
"def average_rank_score(y_true, y_pred, labels=None):\n",
@@ -514,6 +537,7 @@
" top_k_accuracy_score,\n",
" greater_is_better=True,\n",
" response_method=(\"decision_function\", \"predict_proba\"),\n",
+ " labels=np.arange(nmults)\n",
")\n",
"\n",
"#accuracy_scorer.__str__ = lambda self: \"Accuracy\"\n",
@@ -522,7 +546,8 @@
" top_k_accuracy_score,\n",
" greater_is_better=True,\n",
" response_method=(\"decision_function\", \"predict_proba\"),\n",
- " k=5\n",
+ " k=5,\n",
+ " labels=np.arange(nmults)\n",
")\n",
"\n",
"#top_5_scorer.__str__ = lambda self: \"Top-5 accuracy\"\n",
@@ -531,7 +556,8 @@
" top_k_accuracy_score,\n",
" greater_is_better=True,\n",
" response_method=(\"decision_function\", \"predict_proba\"),\n",
- " k=10\n",
+ " k=10,\n",
+ " labels=np.arange(nmults)\n",
")\n",
"\n",
"#top_10_scorer.__str__ = lambda self: \"Top-10 accuracy\"\n",
@@ -540,6 +566,7 @@
" average_rank_score,\n",
" greater_is_better=False,\n",
" response_method=(\"decision_function\", \"predict_proba\"),\n",
+ " labels=np.arange(nmults)\n",
")\n",
"\n",
"#avg_rank_scorer.__str__ = lambda self: \"Average rank\""
@@ -581,7 +608,6 @@
" self.scorer = scorer\n",
"\n",
"class RandomFeatures(FeaturesByClassification):\n",
- "\n",
" def __init__(self,\n",
" allfeats: list[int],\n",
" mults: dict[MultIdent, ProbMap],\n",
@@ -607,7 +633,7 @@
" return startwith\n",
"\n",
" def select(self, nfeats: int, startwith: list[int] = None) -> tuple[list[int], float]:\n",
- " with TaskExecutor(max_workers=self.num_workers) as pool:\n",
+ " with TaskExecutor(max_workers=self.num_workers, initializer=silence) as pool:\n",
" feat_map = []\n",
" for i in range(self.retries):\n",
" feats = self._select_random(nfeats, startwith)\n",
@@ -638,7 +664,7 @@
" raise ValueError(\"No features to select.\")\n",
" available_feats = list(filter(lambda feat: feat not in startwith, self.allfeats))\n",
" current = list(startwith)\n",
- " with TaskExecutor(max_workers=self.num_workers) as pool:\n",
+ " with TaskExecutor(max_workers=self.num_workers, initializer=silence) as pool:\n",
" while toselect > 0:\n",
" for feat in available_feats:\n",
" feats = current + [feat]\n",
@@ -684,7 +710,7 @@
"source": [
"def plot_performance(classifier, scorer, simulations, feature_map, mults, num_workers = 30):\n",
" scores = {}\n",
- " with TaskExecutor(max_workers=num_workers) as pool:\n",
+ " with TaskExecutor(max_workers=num_workers, initializer=silence) as pool:\n",
" for (nattack, nfeats), feats in feature_map.items():\n",
" X, y = to_sklearn(mults, feats)\n",
" pool.submit_task((nattack, nfeats),\n",
@@ -755,11 +781,11 @@
"metadata": {},
"outputs": [],
"source": [
- "simulations = 500\n",
+ "simulations = 15000\n",
"retries = 500\n",
"nattack = range(50, 350, 50)\n",
"nfeats = range(1, 11)\n",
- "num_workers = 30\n",
+ "num_workers = 20\n",
"\n",
"euclid_classifier = EuclidClassifier()\n",
"tree_random_subsets = RandomFeatures(sorted(feats_in_tree), distributions_mults, num_workers,\n",
@@ -1191,7 +1217,7 @@
"def compute_redundancy(feats: list[int], priors, probs, nattack: int, num_workers: int = 30):\n",
" nallfeats = len(feats)\n",
" redundancy = np.zeros((nallfeats, nallfeats), dtype=np.float64)\n",
- " with TaskExecutor(max_workers=num_workers) as pool:\n",
+ " with TaskExecutor(max_workers=num_workers, initializer=silence) as pool:\n",
" for i in trange(nallfeats):\n",
" for j in range(nallfeats):\n",
" if i < j:\n",
@@ -1480,7 +1506,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "c5598117-4c54-4721-9fc5-68432fb8e230",
+ "id": "b04fa91e-28cd-4cff-81d7-7db4359b04a1",
"metadata": {},
"outputs": [],
"source": []
@@ -1502,7 +1528,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.3"
+ "version": "3.13.5"
}
},
"nbformat": 4,
diff --git a/analysis/scalarmults/simulate.ipynb b/analysis/scalarmults/simulate.ipynb
index dae8d06..9b450b1 100644
--- a/analysis/scalarmults/simulate.ipynb
+++ b/analysis/scalarmults/simulate.ipynb
@@ -10,7 +10,7 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"id": "b4386513-cc14-434b-a748-2863f8657452",
"metadata": {},
"outputs": [],
@@ -62,7 +62,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"id": "3463a7bd-34d8-458b-8ceb-dddf99de21dc",
"metadata": {},
"outputs": [],
@@ -79,21 +79,10 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"id": "170c11fc-86cf-4eb1-bf4e-b2e44b2d7ac5",
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Scalar multipliers considered: 65\n",
- "Scalar multipliers (with a single countermeasure) considered: 390\n",
- "Error models considered: 32\n",
- "Total configurations considered: 12480\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"nmults = len(all_mults)\n",
"nmults_ctr = len(all_mults_with_ctr)\n",
@@ -116,7 +105,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": null,
"id": "4d5c7f10-618f-4612-b594-81d1607b0d1d",
"metadata": {},
"outputs": [],
@@ -135,7 +124,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"id": "07bc266d-35eb-4f6d-bdba-e9f6f66827f1",
"metadata": {},
"outputs": [],
@@ -166,7 +155,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"id": "6f3e9ffa-19e8-46b2-a6ad-1d318d5c8e17",
"metadata": {},
"outputs": [],
@@ -201,7 +190,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": null,
"id": "64799c16-8113-4eff-81de-6a3e547eb5c5",
"metadata": {},
"outputs": [],
@@ -228,7 +217,7 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": null,
"id": "ac630a08-4120-41cf-b3bb-1827ef469542",
"metadata": {},
"outputs": [],
@@ -267,421 +256,10 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": null,
"id": "84359084-4116-436c-92cd-d43fdfeca842",
"metadata": {},
- "outputs": [
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "b7c686d503d14bb69250747a881fe3f4",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Computing multiple graphs.: 0%| | 0/390 [00:00<?, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Got SlidingWindow_w:6,recode:LTR.\n",
- "Got SlidingWindow_w:5,recode:LTR+bt.\n",
- "Got SlidingWindow_w:4,recode:LTR.\n",
- "Got SlidingWindow_w:3,recode:LTR+bt.\n",
- "Got SlidingWindow_w:5,recode:LTR+multiplicative.\n",
- "Got SlidingWindow_w:5,recode:LTR+gsr.\n",
- "Got SlidingWindow_w:4,recode:LTR+multiplicative.\n",
- "Got SlidingWindow_w:2,recode:LTR.\n",
- "Got SlidingWindow_w:3,recode:LTR+multiplicative.\n",
- "Got SlidingWindow_w:4,recode:LTR+gsr.\n",
- "Got SlidingWindow_w:3,recode:LTR.\n",
- "Got SlidingWindow_w:2,recode:LTR+bt.\n",
- "Got SlidingWindow_w:3,recode:LTR+gsr.\n",
- "Got SlidingWindow_w:6,recode:LTR+gsr.\n",
- "Got SlidingWindow_w:5,recode:LTR.\n",
- "Got SlidingWindow_w:4,recode:LTR+bt.\n",
- "Got SlidingWindow_w:6,recode:LTR+bt.\n",
- "Got SlidingWindow_w:4,recode:LTR+euclidean.\n",
- "Got SlidingWindow_w:5,recode:LTR+euclidean.\n",
- "Got SlidingWindow_w:3,recode:LTR+euclidean.\n",
- "Got SlidingWindow_w:4,recode:LTR+additive.\n",
- "Got SlidingWindow_w:2,recode:LTR+multiplicative.\n",
- "Got SlidingWindow_w:6,recode:LTR+euclidean.\n",
- "Got SlidingWindow_w:2,recode:LTR+gsr.\n",
- "Got SlidingWindow_w:6,recode:LTR+multiplicative.\n",
- "Got SlidingWindow_w:2,recode:RTL.\n",
- "Got SlidingWindow_w:2,recode:LTR+euclidean.\n",
- "Got SlidingWindow_w:6,recode:LTR+additive.\n",
- "Got SlidingWindow_w:5,recode:LTR+additive.\n",
- "Got SlidingWindow_w:3,recode:LTR+additive.\n",
- "Got SlidingWindow_w:3,recode:RTL.\n",
- "Got SlidingWindow_w:2,recode:RTL+gsr.\n",
- "Got SlidingWindow_w:2,recode:RTL+bt.\n",
- "Got SlidingWindow_w:3,recode:RTL+multiplicative.\n",
- "Got SlidingWindow_w:3,recode:RTL+gsr.\n",
- "Got SlidingWindow_w:3,recode:RTL+bt.\n",
- "Got SlidingWindow_w:5,recode:RTL.\n",
- "Got SlidingWindow_w:4,recode:RTL.\n",
- "Got SlidingWindow_w:2,recode:LTR+additive.\n",
- "Got SlidingWindow_w:2,recode:RTL+multiplicative.\n",
- "Got SlidingWindow_w:4,recode:RTL+bt.\n",
- "Got SlidingWindow_w:4,recode:RTL+gsr.\n",
- "Got SlidingWindow_w:5,recode:RTL+gsr.\n",
- "Got SlidingWindow_w:4,recode:RTL+multiplicative.\n",
- "Got SlidingWindow_w:5,recode:RTL+multiplicative.\n",
- "Got SlidingWindow_w:3,recode:RTL+euclidean.\n",
- "Got SlidingWindow_w:6,recode:RTL.\n",
- "Got SlidingWindow_w:5,recode:RTL+bt.\n",
- "Got SlidingWindow_w:6,recode:RTL+gsr.\n",
- "Got SlidingWindow_w:2,recode:RTL+euclidean.\n",
- "Got SlidingWindow_w:5,recode:RTL+euclidean.\n",
- "Got SlidingWindow_w:4,recode:RTL+euclidean.\n",
- "Got SlidingWindow_w:3,recode:RTL+additive.\n",
- "Got SlidingWindow_w:5,recode:RTL+additive.\n",
- "Got SlidingWindow_w:6,recode:RTL+bt.\n",
- "Got SlidingWindow_w:4,recode:RTL+additive.\n",
- "Got FixedWindowLTR_m:2+gsr.\n",
- "Got SlidingWindow_w:6,recode:RTL+multiplicative.\n",
- "Got FixedWindowLTR_m:4.\n",
- "Got SlidingWindow_w:2,recode:RTL+additive.\n",
- "Got FixedWindowLTR_m:2.\n",
- "Got FixedWindowLTR_m:2+bt.\n",
- "Got SlidingWindow_w:6,recode:RTL+euclidean.\n",
- "Got FixedWindowLTR_m:8.\n",
- "Got FixedWindowLTR_m:2+multiplicative.\n",
- "Got SlidingWindow_w:6,recode:RTL+additive.\n",
- "Got FixedWindowLTR_m:4+bt.\n",
- "Got FixedWindowLTR_m:8+bt.\n",
- "Got FixedWindowLTR_m:4+gsr.\n",
- "Got FixedWindowLTR_m:4+multiplicative.\n",
- "Got FixedWindowLTR_m:16.\n",
- "Got FixedWindowLTR_m:16+gsr.\n",
- "Got FixedWindowLTR_m:8+gsr.\n",
- "Got FixedWindowLTR_m:2+euclidean.\n",
- "Got FixedWindowLTR_m:8+multiplicative.\n",
- "Got FixedWindowLTR_m:16+multiplicative.\n",
- "Got FixedWindowLTR_m:4+euclidean.\n",
- "Got FixedWindowLTR_m:16+bt.\n",
- "Got FixedWindowLTR_m:32+bt.\n",
- "Got FixedWindowLTR_m:64.\n",
- "Got FixedWindowLTR_m:32.\n",
- "Got FixedWindowLTR_m:8+euclidean.\n",
- "Got FixedWindowLTR_m:32+gsr.\n",
- "Got FixedWindowLTR_m:2+additive.\n",
- "Got FixedWindowLTR_m:32+multiplicative.\n",
- "Got FixedWindowLTR_m:64+gsr.\n",
- "Got FixedWindowLTR_m:8+additive.\n",
- "Got FixedWindowLTR_m:4+additive.\n",
- "Got FixedWindowLTR_m:16+additive.\n",
- "Got FixedWindowLTR_m:64+bt.\n",
- "Got FixedWindowLTR_m:16+euclidean.\n",
- "Got FixedWindowLTR_m:32+euclidean.\n",
- "Got FixedWindowLTR_m:64+multiplicative.\n",
- "Got WindowBooth_w:3.\n",
- "Got WindowBooth_w:2+gsr.\n",
- "Got WindowBooth_w:2+bt.\n",
- "Got WindowBooth_w:2.\n",
- "Got FixedWindowLTR_m:32+additive.\n",
- "Got WindowBooth_w:4.\n",
- "Got WindowBooth_w:3+bt.\n",
- "Got FixedWindowLTR_m:64+euclidean.\n",
- "Got WindowBooth_w:3+gsr.\n",
- "Got FixedWindowLTR_m:64+additive.\n",
- "Got WindowBooth_w:4+gsr.\n",
- "Got WindowBooth_w:5.\n",
- "Got WindowBooth_w:5+gsr.\n",
- "Got WindowBooth_w:2+multiplicative.\n",
- "Got WindowBooth_w:6.\n",
- "Got WindowBooth_w:3+multiplicative.\n",
- "Got WindowBooth_w:5+bt.\n",
- "Got WindowBooth_w:4+bt.\n",
- "Got WindowBooth_w:6+gsr.\n",
- "Got WindowBooth_w:6+bt.\n",
- "Got WindowBooth_w:2+additive.\n",
- "Got WindowNAF_w:2.\n",
- "Got WindowBooth_w:4+multiplicative.\n",
- "Got WindowBooth_w:5+multiplicative.\n",
- "Got WindowBooth_w:3+euclidean.\n",
- "Got WindowBooth_w:3+additive.\n",
- "Got WindowBooth_w:4+additive.\n",
- "Got WindowNAF_w:4.\n",
- "Got WindowBooth_w:2+euclidean.\n",
- "Got WindowNAF_w:2+gsr.\n",
- "Got WindowNAF_w:3.\n",
- "Got WindowNAF_w:2+multiplicative.\n",
- "Got WindowBooth_w:5+additive.\n",
- "Got WindowNAF_w:2+bt.\n",
- "Got WindowNAF_w:3+bt.\n",
- "Got WindowBooth_w:6+additive.\n",
- "Got WindowNAF_w:3+gsr.\n",
- "Got WindowNAF_w:3+multiplicative.\n",
- "Got WindowBooth_w:6+multiplicative.\n",
- "Got WindowNAF_w:4+gsr.\n",
- "Got WindowNAF_w:4+bt.\n",
- "Got WindowNAF_w:2+euclidean.\n",
- "Got WindowBooth_w:4+euclidean.\n",
- "Got WindowNAF_w:5.\n",
- "Got WindowNAF_w:4+multiplicative.\n",
- "Got WindowNAF_w:5+bt.\n",
- "Got WindowNAF_w:6.\n",
- "Got WindowNAF_w:6+bt.\n",
- "Got WindowNAF_w:3+euclidean.\n",
- "Got WindowNAF_w:5+gsr.\n",
- "Got WindowBooth_w:5+euclidean.\n",
- "Got WindowNAF_w:5+multiplicative.\n",
- "Got WindowBooth_w:6+euclidean.\n",
- "Got WindowNAF_w:6+gsr.\n",
- "Got WindowNAF_w:3+additive.\n",
- "Got WindowNAF_w:5+euclidean.\n",
- "Got WindowNAF_w:2+additive.\n",
- "Got BinaryNAF_always:False,dir:LTR+bt.\n",
- "Got BinaryNAF_always:False,dir:LTR.\n",
- "Got WindowNAF_w:4+euclidean.\n",
- "Got BinaryNAF_always:False,dir:LTR+gsr.\n",
- "Got WindowNAF_w:6+multiplicative.\n",
- "Got WindowNAF_w:4+additive.\n",
- "Got BinaryNAF_always:False,dir:RTL.\n",
- "Got BinaryNAF_always:False,dir:LTR+multiplicative.\n",
- "Got WindowNAF_w:6+euclidean.\n",
- "Got WindowNAF_w:5+additive.\n",
- "Got WindowNAF_w:6+additive.\n",
- "Got BinaryNAF_always:False,dir:RTL+gsr.\n",
- "Got BinaryNAF_always:False,dir:RTL+multiplicative.\n",
- "Got BinaryNAF_always:True,dir:LTR.\n",
- "Got BinaryNAF_always:False,dir:RTL+bt.\n",
- "Got BinaryNAF_always:False,dir:LTR+euclidean.\n",
- "Got BinaryNAF_always:True,dir:LTR+gsr.\n",
- "Got BinaryNAF_always:False,dir:LTR+additive.\n",
- "Got BinaryNAF_always:True,dir:LTR+multiplicative.\n",
- "Got BinaryNAF_always:True,dir:RTL.\n",
- "Got BinaryNAF_always:False,dir:RTL+euclidean.\n",
- "Got LTR_always:False,complete:True+bt.\n",
- "Got BinaryNAF_always:True,dir:LTR+bt.\n",
- "Got LTR_always:False,complete:True+gsr.\n",
- "Got LTR_always:False,complete:True.\n",
- "Got BinaryNAF_always:True,dir:RTL+multiplicative.\n",
- "Got BinaryNAF_always:True,dir:RTL+gsr.\n",
- "Got BinaryNAF_always:True,dir:LTR+euclidean.\n",
- "Got BinaryNAF_always:True,dir:RTL+bt.\n",
- "Got BinaryNAF_always:False,dir:RTL+additive.\n",
- "Got LTR_always:False,complete:False.\n",
- "Got LTR_always:True,complete:True.\n",
- "Got LTR_always:False,complete:False+gsr.\n",
- "Got LTR_always:False,complete:True+multiplicative.\n",
- "Got LTR_always:True,complete:True+gsr.\n",
- "Got BinaryNAF_always:True,dir:LTR+additive.\n",
- "Got LTR_always:False,complete:False+multiplicative.\n",
- "Got LTR_always:False,complete:False+bt.\n",
- "Got LTR_always:True,complete:True+bt.\n",
- "Got LTR_always:False,complete:True+additive.\n",
- "Got BinaryNAF_always:True,dir:RTL+euclidean.\n",
- "Got BinaryNAF_always:True,dir:RTL+additive.\n",
- "Got RTL_always:False,complete:True.\n",
- "Got LTR_always:False,complete:False+euclidean.\n",
- "Got LTR_always:True,complete:False.\n",
- "Got LTR_always:True,complete:False+gsr.\n",
- "Got LTR_always:False,complete:True+euclidean.\n",
- "Got RTL_always:False,complete:True+bt.\n",
- "Got RTL_always:False,complete:True+gsr.\n",
- "Got LTR_always:True,complete:False+bt.\n",
- "Got LTR_always:False,complete:False+additive.\n",
- "Got LTR_always:True,complete:False+multiplicative.\n",
- "Got RTL_always:False,complete:False.\n",
- "Got LTR_always:True,complete:True+additive.\n",
- "Got LTR_always:True,complete:True+multiplicative.\n",
- "Got RTL_always:False,complete:False+gsr.\n",
- "Got RTL_always:False,complete:True+multiplicative.\n",
- "Got RTL_always:True,complete:True+gsr.\n",
- "Got LTR_always:True,complete:False+euclidean.\n",
- "Got RTL_always:True,complete:True.\n",
- "Got RTL_always:True,complete:True+bt.\n",
- "Got RTL_always:False,complete:False+bt.\n",
- "Got RTL_always:False,complete:False+multiplicative.\n",
- "Got RTL_always:True,complete:False.\n",
- "Got RTL_always:False,complete:True+additive.\n",
- "Got Coron.\n",
- "Got RTL_always:True,complete:False+gsr.\n",
- "Got RTL_always:False,complete:False+euclidean.\n",
- "Got FullPrecomp_always:False,complete:True.\n",
- "Got RTL_always:True,complete:False+multiplicative.\n",
- "Got LTR_always:True,complete:False+additive.\n",
- "Got RTL_always:True,complete:False+bt.\n",
- "Got RTL_always:False,complete:False+additive.\n",
- "Got RTL_always:False,complete:True+euclidean.\n",
- "Got Coron+bt.\n",
- "Got Coron+gsr.\n",
- "Got FullPrecomp_always:False,complete:True+bt.\n",
- "Got FullPrecomp_always:False,complete:True+gsr.\n",
- "Got Coron+multiplicative.\n",
- "Got RTL_always:True,complete:False+euclidean.\n",
- "Got FullPrecomp_always:False,complete:True+additive.\n",
- "Got FullPrecomp_always:True,complete:True.\n",
- "Got FullPrecomp_always:True,complete:True+gsr.\n",
- "Got RTL_always:True,complete:True+multiplicative.\n",
- "Got RTL_always:True,complete:True+additive.\n",
- "Got FullPrecomp_always:False,complete:False.\n",
- "Got FullPrecomp_always:False,complete:False+gsr.\n",
- "Got FullPrecomp_always:False,complete:True+multiplicative.\n",
- "Got Coron+euclidean.\n",
- "Got LTR_always:True,complete:True+euclidean.\n",
- "Got FullPrecomp_always:False,complete:True+euclidean.\n",
- "Got FullPrecomp_always:False,complete:False+additive.\n",
- "Got FullPrecomp_always:True,complete:True+bt.\n",
- "Got FullPrecomp_always:False,complete:False+bt.\n",
- "Got RTL_always:True,complete:False+additive.\n",
- "Got FullPrecomp_always:True,complete:False.\n",
- "Got RTL_always:True,complete:True+euclidean.\n",
- "Got Coron+additive.\n",
- "Got FullPrecomp_always:True,complete:True+additive.\n",
- "Got FullPrecomp_always:True,complete:False+bt.\n",
- "Got FullPrecomp_always:False,complete:False+multiplicative.\n",
- "Got FullPrecomp_always:True,complete:False+gsr.\n",
- "Got SimpleLadder_complete:True.\n",
- "Got FullPrecomp_always:False,complete:False+euclidean.\n",
- "Got SimpleLadder_complete:True+gsr.\n",
- "Got FullPrecomp_always:True,complete:False+multiplicative.\n",
- "Got SimpleLadder_complete:False.\n",
- "Got Comb_w:2,always:True.\n",
- "Got SimpleLadder_complete:True+bt.\n",
- "Got FullPrecomp_always:True,complete:False+additive.\n",
- "Got SimpleLadder_complete:False+gsr.\n",
- "Got SimpleLadder_complete:False+bt.\n",
- "Got FullPrecomp_always:True,complete:True+multiplicative.\n",
- "Got SimpleLadder_complete:False+multiplicative.\n",
- "Got Comb_w:3,always:True.\n",
- "Got FullPrecomp_always:True,complete:False+euclidean.\n",
- "Got Comb_w:2,always:True+gsr.\n",
- "Got Comb_w:3,always:True+bt.\n",
- "Got Comb_w:2,always:True+bt.\n",
- "Got Comb_w:3,always:True+gsr.\n",
- "Got FullPrecomp_always:True,complete:True+euclidean.\n",
- "Got Comb_w:4,always:True+gsr.\n",
- "Got Comb_w:2,always:True+additive.\n",
- "Got Comb_w:4,always:True.\n",
- "Got SimpleLadder_complete:True+multiplicative.\n",
- "Got SimpleLadder_complete:False+euclidean.\n",
- "Got Comb_w:3,always:True+additive.\n",
- "Got SimpleLadder_complete:True+additive.\n",
- "Got Comb_w:4,always:True+bt.\n",
- "Got Comb_w:2,always:True+multiplicative.\n",
- "Got Comb_w:5,always:True.\n",
- "Got Comb_w:5,always:True+gsr.\n",
- "Got SimpleLadder_complete:False+additive.\n",
- "Got Comb_w:5,always:True+additive.\n",
- "Got Comb_w:4,always:True+additive.\n",
- "Got Comb_w:5,always:True+bt.\n",
- "Got Comb_w:6,always:True.\n",
- "Got Comb_w:6,always:True+bt.\n",
- "Got Comb_w:3,always:True+multiplicative.\n",
- "Got Comb_w:6,always:True+gsr.\n",
- "Got Comb_w:4,always:True+multiplicative.\n",
- "Got Comb_w:2,always:False.\n",
- "Got Comb_w:6,always:True+additive.\n",
- "Got Comb_w:3,always:True+euclidean.\n",
- "Got SimpleLadder_complete:True+euclidean.\n",
- "Got Comb_w:3,always:False.\n",
- "Got Comb_w:2,always:False+bt.\n",
- "Got Comb_w:2,always:False+gsr.\n",
- "Got Comb_w:3,always:False+gsr.\n",
- "Got Comb_w:2,always:True+euclidean.\n",
- "Got Comb_w:3,always:False+bt.\n",
- "Got Comb_w:5,always:True+multiplicative.\n",
- "Got Comb_w:4,always:False.\n",
- "Got Comb_w:3,always:False+additive.\n",
- "Got Comb_w:4,always:True+euclidean.\n",
- "Got Comb_w:4,always:False+gsr.\n",
- "Got Comb_w:5,always:True+euclidean.\n",
- "Got Comb_w:2,always:False+additive.\n",
- "Got Comb_w:4,always:False+bt.\n",
- "Got Comb_w:4,always:False+additive.\n",
- "Got Comb_w:6,always:True+multiplicative.\n",
- "Got Comb_w:6,always:True+euclidean.\n",
- "Got Comb_w:5,always:False+gsr.\n",
- "Got Comb_w:5,always:False.\n",
- "Got Comb_w:3,always:False+multiplicative.\n",
- "Got Comb_w:2,always:False+multiplicative.\n",
- "Got Comb_w:5,always:False+additive.\n",
- "Got Comb_w:4,always:False+multiplicative.\n",
- "Got Comb_w:5,always:False+bt.\n",
- "Got BGMW_w:2,dir:LTR.\n",
- "Got Comb_w:2,always:False+euclidean.\n",
- "Got Comb_w:3,always:False+euclidean.\n",
- "Got Comb_w:6,always:False+bt.\n",
- "Got Comb_w:6,always:False.\n",
- "Got Comb_w:6,always:False+additive.\n",
- "Got BGMW_w:3,dir:LTR+gsr.\n",
- "Got BGMW_w:2,dir:LTR+bt.\n",
- "Got Comb_w:6,always:False+gsr.\n",
- "Got BGMW_w:3,dir:LTR.\n",
- "Got Comb_w:4,always:False+euclidean.\n",
- "Got BGMW_w:2,dir:LTR+gsr.\n",
- "Got BGMW_w:2,dir:LTR+additive.\n",
- "Got BGMW_w:3,dir:LTR+bt.\n",
- "Got Comb_w:5,always:False+multiplicative.\n",
- "Got BGMW_w:3,dir:LTR+additive.\n",
- "Got BGMW_w:4,dir:LTR.\n",
- "Got BGMW_w:4,dir:LTR+gsr.\n",
- "Got BGMW_w:4,dir:LTR+bt.\n",
- "Got BGMW_w:5,dir:LTR+gsr.\n",
- "Got BGMW_w:4,dir:LTR+additive.\n",
- "Got BGMW_w:2,dir:LTR+multiplicative.\n",
- "Got BGMW_w:5,dir:LTR.\n",
- "Got BGMW_w:5,dir:LTR+bt.\n",
- "Got BGMW_w:3,dir:LTR+multiplicative.\n",
- "Got BGMW_w:3,dir:LTR+euclidean.\n",
- "Got Comb_w:5,always:False+euclidean.\n",
- "Got BGMW_w:2,dir:LTR+euclidean.\n",
- "Got BGMW_w:6,dir:LTR.\n",
- "Got BGMW_w:5,dir:LTR+additive.\n",
- "Got BGMW_w:2,dir:RTL.\n",
- "Got BGMW_w:4,dir:LTR+multiplicative.\n",
- "Got BGMW_w:6,dir:LTR+gsr.\n",
- "Got BGMW_w:2,dir:RTL+gsr.\n",
- "Got BGMW_w:6,dir:LTR+bt.\n",
- "Got BGMW_w:6,dir:LTR+additive.\n",
- "Got Comb_w:6,always:False+multiplicative.\n",
- "Got Comb_w:6,always:False+euclidean.\n",
- "Got BGMW_w:2,dir:RTL+bt.\n",
- "Got BGMW_w:2,dir:RTL+additive.\n",
- "Got BGMW_w:3,dir:RTL.\n",
- "Got BGMW_w:5,dir:LTR+multiplicative.\n",
- "Got BGMW_w:3,dir:RTL+bt.\n",
- "Got BGMW_w:4,dir:LTR+euclidean.\n",
- "Got BGMW_w:3,dir:RTL+gsr.\n",
- "Got BGMW_w:4,dir:RTL.\n",
- "Got BGMW_w:6,dir:LTR+multiplicative.\n",
- "Got BGMW_w:2,dir:RTL+multiplicative.\n",
- "Got BGMW_w:4,dir:RTL+gsr.\n",
- "Got BGMW_w:5,dir:LTR+euclidean.\n",
- "Got BGMW_w:4,dir:RTL+bt.\n",
- "Got BGMW_w:5,dir:RTL.\n",
- "Got BGMW_w:2,dir:RTL+euclidean.\n",
- "Got BGMW_w:5,dir:RTL+gsr.\n",
- "Got BGMW_w:5,dir:RTL+bt.\n",
- "Got BGMW_w:4,dir:RTL+additive.\n",
- "Got BGMW_w:6,dir:LTR+euclidean.\n",
- "Got BGMW_w:3,dir:RTL+additive.\n",
- "Got BGMW_w:6,dir:RTL.\n",
- "Got BGMW_w:3,dir:RTL+euclidean.\n",
- "Got BGMW_w:5,dir:RTL+additive.\n",
- "Got BGMW_w:4,dir:RTL+euclidean.\n",
- "Got BGMW_w:3,dir:RTL+multiplicative.\n",
- "Got BGMW_w:6,dir:RTL+gsr.\n",
- "Got BGMW_w:4,dir:RTL+multiplicative.\n",
- "Got BGMW_w:6,dir:RTL+bt.\n",
- "Got BGMW_w:6,dir:RTL+additive.\n",
- "Got BGMW_w:5,dir:RTL+euclidean.\n",
- "Got BGMW_w:5,dir:RTL+multiplicative.\n",
- "Got BGMW_w:6,dir:RTL+multiplicative.\n",
- "Got BGMW_w:6,dir:RTL+euclidean.\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"chunk_id = randbytes(4).hex()\n",
"with TaskExecutor(max_workers=num_workers, initializer=silence) as pool, tempfile.TemporaryDirectory() as tmp_dirname:\n",
@@ -715,69 +293,15 @@
"execution_count": null,
"id": "fbab8333-b8f1-4890-b38a-7bb34f5ffb02",
"metadata": {},
- "outputs": [
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "e682673036ab4053a6a2a5fc60194691",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Processing chunks: 0%| | 0/1 [00:00<?, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Processing chunk 66897617, no probmaps found.\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "5d0ac8b447b942f1bb6e31ee9efc9694",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Loading chunk 66897617.: 0%| | 0/390 [00:00<?, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "1dfe709e6f37433aaa4dd6d0c4823373",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Processing 66897617.: 0%| | 0/12480 [00:00<?, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
+ "outputs": [],
"source": [
"with TaskExecutor(max_workers=num_workers, initializer=silence) as pool:\n",
" for in_fname in tqdm(glob.glob(f\"multiples_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n",
" \n",
" match = re.match(\"multiples_(?P<bits>[0-9]+)_(?P<init>(?:no)?init)_(?P<mult>(?:no)?mult)_chunk(?P<id>[0-9a-f]+).pickle\", in_fname)\n",
- " bits = match.group(\"bits\")\n",
- " use_init = match.group(\"init\")\n",
- " use_multiply = match.group(\"mult\")\n",
" chunk_id = match.group(\"id\")\n",
" out_fname = f\"probs_{bits}_{use_init}_{use_multiply}_chunk{chunk_id}.pickle\"\n",
"\n",
- "\n",
" in_file = Path(in_fname)\n",
" out_file = Path(out_fname)\n",
"\n",
@@ -847,6 +371,59 @@
},
{
"cell_type": "markdown",
+ "id": "a1bb9459-aa2b-4a07-970e-6e981ab3f97e",
+ "metadata": {},
+ "source": [
+ "## Merge"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0f8daeda-f289-42c0-9157-19c0f688c6aa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "probmaps = {}\n",
+ "for in_fname in tqdm(glob.glob(f\"probs_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk*.pickle\"), desc=\"Processing chunks\", smoothing=0):\n",
+ " \n",
+ " match = re.match(\"probs_(?P<bits>[0-9]+)_(?P<init>(?:no)?init)_(?P<mult>(?:no)?mult)_chunk(?P<id>[0-9a-f]+).pickle\", in_fname)\n",
+ " chunk_id = match.group(\"id\") \n",
+ " \n",
+ " with open(in_fname, \"rb\") as f:\n",
+ " loading_bar = tqdm(total=ncfgs, desc=f\"Loading chunk {chunk_id}.\", smoothing=0)\n",
+ " while True:\n",
+ " try:\n",
+ " mult, probmap = pickle.load(f)\n",
+ " if mult in probmaps:\n",
+ " current = probmaps[mult]\n",
+ " current.merge(probmap)\n",
+ " else:\n",
+ " probmaps[mult] = probmap\n",
+ " loading_bar.update(1)\n",
+ " except EOFError:\n",
+ " break\n",
+ " except pickle.UnpicklingError:\n",
+ " print(\"Bad unpickling, the probs file is likely truncated.\")\n",
+ " break\n",
+ " loading_bar.close()\n",
+ "\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6bc15256-8228-4498-9bbf-73e76eea3c70",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open(\"merged.pickle\", \"wb\") as f:\n",
+ " pickle.dump(probmaps, f)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
"id": "228922dc-67bf-481a-9f08-4084695e2059",
"metadata": {},
"source": [
@@ -864,38 +441,18 @@
"mult = next(iter(multiples_mults))\n",
"res = multiples_mults[mult]\n",
"\n",
- "\n",
"for checks in powerset(checks_add):\n",
" for precomp_to_affine in (True, False):\n",
" for check_condition in (\"all\", \"necessary\"):\n",
" error_model = ErrorModel(checks, check_condition=check_condition, precomp_to_affine=precomp_to_affine)\n",
" full = mult.with_error_model(error_model)\n",
" print(full)\n",
- " #with PyProfiler() as prof:\n",
- " probmap = evaluate_multiples(full, res, divisor_map[\"all\"])\n",
- " #print(prof.output_text(unicode=True, color=True))\n",
- " #print(probmap)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3d291832-b0c7-4c3a-9989-22079e4e0f53",
- "metadata": {},
- "outputs": [],
- "source": [
- "multiples_mults = {}\n",
- "for fname in glob.glob(f\"multiples_{bits}_{'init' if use_init else 'noinit'}_{'mult' if use_multiply else 'nomult'}_chunk*.pickle\"):\n",
- " with open(fname, \"rb\") as f:\n",
- " while True:\n",
- " try:\n",
- " mult, vals = pickle.load(f)\n",
- " if mult not in multiples_mults:\n",
- " multiples_mults[mult] = vals\n",
- " else:\n",
- " multiples_mults[mult].merge(vals)\n",
- " except EOFError:\n",
- " break"
+ " with PyProfiler() as prof:\n",
+ " probmap = evaluate_multiples(full, res, divisor_map[\"all\"])\n",
+ " print(prof.output_text(unicode=True, color=True))\n",
+ " break\n",
+ " break\n",
+ " break"
]
},
{
@@ -923,7 +480,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.3"
+ "version": "3.13.5"
}
},
"nbformat": 4,
diff --git a/analysis/scalarmults/visualize.ipynb b/analysis/scalarmults/visualize.ipynb
index 709e566..d2d6b0f 100644
--- a/analysis/scalarmults/visualize.ipynb
+++ b/analysis/scalarmults/visualize.ipynb
@@ -19,6 +19,7 @@
"import itertools\n",
"import glob\n",
"import gc\n",
+ "import random\n",
"\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
@@ -126,9 +127,9 @@
"source": [
"selected_mults = all_mults\n",
"divisor_name = \"all\"\n",
- "kind = \"precomp+necessary\"\n",
"showci = False\n",
- "selected_divisors = divisor_map[divisor_name]"
+ "selected_divisors = divisor_map[divisor_name]\n",
+ "show_error_model = ErrorModel({}, \"all\", True)"
]
},
{
@@ -138,8 +139,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# Optionally, load\n",
- "with open(f\"{divisor_name}_{kind}_distrs.pickle\", \"rb\") as f:\n",
+ "with open(f\"merged.pickle\", \"rb\") as f:\n",
" distributions_mults = pickle.load(f)"
]
},
@@ -152,21 +152,6 @@
]
},
{
- "cell_type": "code",
- "execution_count": null,
- "id": "5ccc28f6-3994-4a0d-8639-2f6df4dddd26",
- "metadata": {},
- "outputs": [],
- "source": [
- "for mult, probmap in distributions_mults.items():\n",
- " for divisor in sorted(divisor_map[divisor_name]):\n",
- " if divisor not in probmap.probs:\n",
- " print(f\"Missing {mult}, {divisor}\")\n",
- " if probmap.kind is not None and probmap.kind != kind:\n",
- " print(\"Bad kind! Did you forget to load?\")"
- ]
- },
- {
"cell_type": "markdown",
"id": "9b6f169b-07b3-4b27-ba36-8b90418cd072",
"metadata": {},
@@ -183,7 +168,7 @@
"outputs": [],
"source": [
"for divisor_name in divisor_map:\n",
- " plot_mults = list(filter(lambda mult: mult in distributions_mults and mult.klass not in (CombMultiplier, BGMWMultiplier), all_mults_with_ctr))\n",
+ " plot_mults = list(filter(lambda mult: mult in distributions_mults and mult.klass not in (CombMultiplier, BGMWMultiplier), distributions_mults))\n",
" print(divisor_name, \"nocomb\")\n",
" plot_divisors = sorted(divisor_map[divisor_name])\n",
" L = len(plot_divisors)\n",
@@ -196,14 +181,15 @@
" vals = np.zeros((N, L))\n",
" n_samples = 0\n",
" for i, mult in enumerate(plot_mults):\n",
+ " clear_mult = mult.with_error_model(None)\n",
" probmap = distributions_mults[mult]\n",
" y_values = [probmap[l] for l in plot_divisors]\n",
" vals[i,] = y_values\n",
" ax.plot(x, y_values,\n",
- " color=colors[mult],\n",
- " linestyle=styles[mult],\n",
- " marker=markers[mult],\n",
- " label=str(mult) if mult.countermeasure is None else \"_nolegend_\")\n",
+ " color=colors[clear_mult],\n",
+ " linestyle=styles[clear_mult],\n",
+ " marker=markers[clear_mult],\n",
+ " label=str(mult) if mult.countermeasure is None and mult.error_model == show_error_model else \"_nolegend_\")\n",
" if showci:\n",
" cis = [conf_interval(p, probmap.samples) for p in y_values]\n",
" ci_low = [ci[0] for ci in cis]\n",
@@ -211,7 +197,7 @@
" ax.fill_between(x, ci_low, ci_high, color=\"black\", alpha=0.1)\n",
" n_samples += probmap.samples\n",
" \n",
- " ax.set_title(f\"{divisor_name} ({kind})\\nSamples: \" + str(n_samples//N))\n",
+ " ax.set_title(f\"{divisor_name}\\nSamples: \" + str(n_samples//N))\n",
" \n",
" #var = np.var(vals, axis=0)\n",
" #ax.plot(x, var / np.max(var), label=\"cross-mult variance (normalized)\", ls=\"--\", lw=2, color=\"black\")\n",
@@ -231,7 +217,7 @@
" \n",
" ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n",
"\n",
- " fig.savefig(f\"graphs/{kind}-kind/{divisor_name}-nocomb{'+ci' if showci else ''}.pdf\");\n",
+ " fig.savefig(f\"graphs/{divisor_name}-nocomb{'+ci' if showci else ''}.pdf\");\n",
" plt.close()"
]
},
@@ -321,28 +307,39 @@
"outputs": [],
"source": [
"#divisor_name = \"powers_of_2_large\"\n",
- "divisor_name = \"feature\"\n",
- "plot_mults = list(filter(lambda mult: mult in distributions_mults, all_mults_with_ctr))\n",
+ "single_mult = random.choice(list(distributions_mults.keys())).with_error_model(None)\n",
+ "divisor_name = \"all_integers\"\n",
+ "plot_mults = list(filter(lambda mult: mult.with_error_model(None) == single_mult, distributions_mults))\n",
+ "plot_divisors = divisor_map[divisor_name]\n",
"#plot_divisors = (61, 65, 111, 165, 1536, 12288) \n",
- "plot_divisors = (55, 65, 165, 248, 3072)\n",
+ "#plot_divisors = (55, 65, 165, 248, 3072)\n",
"L = len(plot_divisors)\n",
"N = len(plot_mults)\n",
"x = list(range(L))\n",
"\n",
+ "colors = plt.get_cmap('tab20').colors +plt.get_cmap('tab20b').colors[:12]\n",
+ "\n",
"fig = plt.figure(figsize=(L/4+15, 24))\n",
"ax = plt.subplot(111)\n",
"\n",
"vals = np.zeros((N, L))\n",
"n_samples = 0\n",
+ "groups = {}\n",
"for i, mult in enumerate(plot_mults):\n",
+ "\n",
+ " clear_mult = mult.with_error_model(None)\n",
" probmap = distributions_mults[mult]\n",
" y_values = [probmap[l] for l in plot_divisors]\n",
+ " y_tup = tuple(y_values)\n",
+ " group = groups.setdefault(y_tup, set())\n",
+ " group.add(mult)\n",
" vals[i,] = y_values\n",
- " ax.plot(x, y_values,\n",
- " color=colors[mult],\n",
- " linestyle=styles[mult],\n",
- " marker=markers[mult],\n",
- " label=str(mult) if mult.countermeasure is None else \"_nolegend_\")\n",
+ " offset = (i - N/2) * 0.0001\n",
+ " ax.plot(x,[v + offset for v in y_values],\n",
+ " color=colors[i],\n",
+ " linestyle=styles[clear_mult],\n",
+ " marker=markers[clear_mult],\n",
+ " label=str(mult))\n",
" if showci:\n",
" cis = [conf_interval(p, probmap.samples) for p in y_values]\n",
" ci_low = [ci[0] for ci in cis]\n",
@@ -350,7 +347,7 @@
" ax.fill_between(x, ci_low, ci_high, color=\"black\", alpha=0.1)\n",
" n_samples += probmap.samples\n",
"\n",
- "ax.set_title(f\"{divisor_name} ({kind})\\nSamples(avg): \" + str(n_samples//N))\n",
+ "ax.set_title(f\"{divisor_name}\\nSamples(avg): \" + str(n_samples//N))\n",
"\n",
"#var = np.var(vals, axis=0)\n",
"#ax.plot(x, var / np.max(var), label=\"cross-mult variance (normalized)\", ls=\"--\", lw=2, color=\"black\")\n",
@@ -369,14 +366,52 @@
"ax.set_position([box.x0, box.y0, box.width * 0.7, box.height])\n",
"\n",
"# Put a legend to the right of the current axis\n",
- "ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n",
- "plt.show()"
+ "ax.legend(loc='center left', bbox_to_anchor=(1, 0.5));\n",
+ "for _, group in groups.items():\n",
+ " print(group)\n",
+ " print()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bf6a5acb-2836-445b-a877-c29ed0f03bf7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "divisor_name = \"all\"\n",
+ "plot_divisors = divisor_map[divisor_name]\n",
+ "ngroups = []\n",
+ "for mult in all_mults_with_ctr:\n",
+ " groups = {}\n",
+ " for error_model in all_error_models:\n",
+ " full = mult.with_error_model(error_model)\n",
+ " probmap = distributions_mults[full]\n",
+ " y_values = [probmap[l] for l in plot_divisors]\n",
+ " y_tup = tuple(y_values)\n",
+ " group = groups.setdefault(y_tup, set())\n",
+ " group.add(mult)\n",
+ " ngroups.append(len(groups))\n",
+ "print(np.min(ngroups))\n",
+ "print(np.mean(ngroups))\n",
+ "print(np.median(ngroups))\n",
+ "print(np.max(ngroups))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9c41722a-45b1-40f0-afc4-1aadc882fcc0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plt.close(\"all\")"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "d68f0bfc-cdf1-4891-b0e5-0b6d1b02ded7",
+ "id": "e7d7e614-467d-4e7d-874b-0495ef4dcf27",
"metadata": {},
"outputs": [],
"source": []
@@ -398,7 +433,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.3"
+ "version": "3.13.5"
}
},
"nbformat": 4,
diff --git a/requirements.txt b/requirements.txt
index e28315b..595b20a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,3 +8,4 @@ tabulate
tqdm
statsmodels
anytree
+scikit-learn