Skip to content

Commit

Permalink
Merge branch 'main' into gradinversion
Browse files Browse the repository at this point in the history
  • Loading branch information
viktorvaladi committed Jan 8, 2025
2 parents 8a55f3f + e5c363d commit 2168986
Show file tree
Hide file tree
Showing 31 changed files with 76,987 additions and 67,763 deletions.
56 changes: 20 additions & 36 deletions .github/workflows/run_tests.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Running tests

on:
Expand All @@ -10,22 +8,8 @@ on:
workflow_dispatch:

jobs:
code-checks:
code-checks-and-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Ruff Linting
uses: chartboost/ruff-action@v1
with:
args: check leakpro --exclude examples,leakpro/tests

build:
runs-on: ubuntu-latest
permissions:
pull-requests: write
contents: write # Ensure write permission for contents
steps:
- name: Checkout code
uses: actions/checkout@v4
Expand All @@ -34,33 +18,33 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: 3.9

- name: Install micromamba
run: |
curl -L https://micromamba.snakepit.net/api/micromamba/linux-64/latest | tar -xvj -C /usr/local/bin/ --strip-components=1 bin/micromamba

- name: Install dependencies with micromamba
- name: Install dependencies
run: |
micromamba create --file environment.yml --name leakpro --root-prefix /home/runner/micromamba-root
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Set PYTHONPATH
run: echo "PYTHONPATH=$(pwd)" >> $GITHUB_ENV
- name: Ruff Linting
run: |
pip install ruff
ruff check leakpro --exclude examples,leakpro/tests
- name: Install pytest and pytest-cov
shell: bash -l {0}
run: |
micromamba activate leakpro
micromamba install pytest pytest-cov
pip install pytest pytest-cov pytest-mock coverage-badge
- name: Run tests with pytest
shell: bash -l {0}
run: |
micromamba activate leakpro
pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=leakpro leakpro/tests/ | tee pytest-coverage.txt
cat ./pytest-coverage.txt
pytest --cov=leakpro --cov-report=term-missing:skip-covered --cov-report=xml --cov-report=html leakpro/tests/
cat ./coverage.xml
- name: Create Coverage Badge
run: |
mkdir -p badges
coverage-badge -o badges/coverage.svg -f
- name: Upload test coverage report
uses: actions/upload-artifact@v4
- name: Deploy Coverage Badge to GitHub Pages
uses: JamesIves/github-pages-deploy-action@v4
with:
name: pytest-coverage.txt
path: pytest-coverage.txt
branch: gh-pages
folder: badges
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
![Open Issues](https://img.shields.io/github/issues/aidotse/LeakPro)
![Open PRs](https://img.shields.io/github/issues-pr/aidotse/LeakPro)
![Downloads](https://img.shields.io/github/downloads/aidotse/LeakPro/total)
![Coverage](https://github.com/aidotse/LeakPro/blob/gh-pages/coverage.svg)

## To install
0. **Clone repository**
Expand Down
2 changes: 2 additions & 0 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ dependencies:
# Deep Learning
- pytorch
- torchvision
- torchmetrics

# Utilities
- dotmap
Expand All @@ -32,5 +33,6 @@ dependencies:

# Development and Testing
- pytest
- pytest-mock


397 changes: 38 additions & 359 deletions examples/report_handler/report_handler.ipynb

Large diffs are not rendered by default.

14 changes: 7 additions & 7 deletions examples/synthetic_data/anomalies_example.ipynb

Large diffs are not rendered by default.

18 changes: 9 additions & 9 deletions examples/synthetic_data/inference_example.ipynb

Large diffs are not rendered by default.

65 changes: 33 additions & 32 deletions examples/synthetic_data/linkability_example.ipynb

Large diffs are not rendered by default.

53 changes: 29 additions & 24 deletions examples/synthetic_data/singling_out_example.ipynb

Large diffs are not rendered by default.

29 changes: 16 additions & 13 deletions leakpro/attacks/mia_attacks/lira.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,21 +134,21 @@ def prepare_attack(self:Self)->None:
mask = (num_shadow_models_seen_points > 0) & (num_shadow_models_seen_points < self.num_shadow_models)

# Filter the audit data
self.audit_dataset["data"] = self.audit_dataset["data"][mask]
self.audit_data_indices = self.audit_dataset["data"][mask]
self.in_indices_masks = self.in_indices_masks[mask, :]

# Filter IN and OUT members
self.in_members = np.arange(np.sum(mask[self.audit_dataset["in_members"]]))
num_out_members = np.sum(mask[self.audit_dataset["out_members"]])
self.out_members = np.arange(len(self.in_members), len(self.in_members) + num_out_members)

assert len(self.audit_dataset["data"]) == len(self.in_members) + len(self.out_members)
assert len(self.audit_data_indices) == len(self.in_members) + len(self.out_members)

if len(self.audit_dataset["data"]) == 0:
if len(self.audit_data_indices) == 0:
raise ValueError("No points in the audit dataset are used for the shadow models")

else:
self.audit_dataset["data"] = self.audit_dataset["data"]
self.audit_data_indices = self.audit_dataset["data"]
self.in_members = self.audit_dataset["in_members"]
self.out_members = self.audit_dataset["out_members"]

Expand All @@ -160,21 +160,24 @@ def prepare_attack(self:Self)->None:
logger.info("This is not an offline attack!")

logger.info(f"Calculating the logits for all {self.num_shadow_models} shadow models")
self.shadow_models_logits = np.swapaxes(self.signal(self.shadow_models, self.handler, self.audit_dataset["data"],
self.eval_batch_size), 0, 1)
self.shadow_models_logits = np.swapaxes(self.signal(self.shadow_models,
self.handler,
self.audit_data_indices,
self.eval_batch_size), 0, 1)

# Calculate logits for the target model
logger.info("Calculating the logits for the target model")
self.target_logits = np.swapaxes(self.signal([self.target_model], self.handler, self.audit_dataset["data"],
self.eval_batch_size), 0, 1).squeeze()
self.target_logits = np.swapaxes(self.signal([self.target_model],
self.handler,
self.audit_data_indices,
self.eval_batch_size), 0, 1).squeeze()

# Using Memorizationg boosting
if self.memorization:

# Prepare for memorization
org_audit_data_length = self.audit_dataset["data"].size
self.audit_dataset["data"] = self.audit_dataset["data"][mask] if self.online else self.audit_dataset["data"]
audit_data_labels = self.handler.get_labels(self.audit_dataset["data"])
org_audit_data_length = self.audit_data_indices.size
audit_data_labels = self.handler.get_labels(self.audit_data_indices)

logger.info("Running memorization")
memorization = Memorization(
Expand All @@ -185,7 +188,7 @@ def prepare_attack(self:Self)->None:
self.in_indices_masks,
self.shadow_models,
self.target_model,
self.audit_dataset["data"],
self.audit_data_indices,
audit_data_labels,
org_audit_data_length,
self.handler,
Expand Down Expand Up @@ -318,5 +321,5 @@ def run_attack(self:Self) -> MIAResult:
true_labels=true_labels,
predictions_proba=None,
signal_values=signal_values,
audit_indices=self.audit_dataset["data"],
audit_indices=self.audit_data_indices,
)
23 changes: 9 additions & 14 deletions leakpro/metrics/attack_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ def save(self:Self, path: str, name: str, config:dict = None, show_plot:bool = F
"tpr": self.tpr.tolist(),
"fpr": self.fpr.tolist(),
"roc_auc": self.roc_auc,
"config": config,
"config": result_config,
"fixed_fpr": fixed_fpr_table,
"audit_indices": self.audit_indices.tolist() if self.audit_indices is not None else None,
"signal_values": self.signal_values.tolist() if self.signal_values is not None else None,
Expand All @@ -310,7 +310,7 @@ def save(self:Self, path: str, name: str, config:dict = None, show_plot:bool = F
temp_res.id = self.id
self.create_plot(results = [temp_res],
save_dir = save_path,
save_name = "ROC",
save_name = name,
show_plot = show_plot
)

Expand Down Expand Up @@ -415,7 +415,7 @@ def create_plot(

plt.xlabel("False positive rate (FPR)")
plt.ylabel("True positive rate (TPR)")
plt.title(save_name+"ROC Curve")
plt.title(save_name+" ROC Curve")
plt.savefig(fname=f"{filename}.png", dpi=1000, bbox_inches="tight")

if show_plot:
Expand Down Expand Up @@ -474,28 +474,24 @@ def create_results(
@staticmethod
def _latex(
results: list,
save_dir: str,
save_dir: str, # noqa: ARG004
save_name: str
) -> str:
"""Latex method for MIAResult."""

filename = f"{save_dir}/{save_name}"

# Input mia results image
latex_content = f"""
\\subsection{{{" ".join(save_name.split("_"))}}}
\\begin{{figure}}[ht]
\\includegraphics[width=0.8\\textwidth]{{{filename}.png}}
\\includegraphics[width=0.8\\textwidth]{{{save_name}.png}}
\\end{{figure}}
"""

# Initialize latex table
latex_content += """
\\resizebox{\\linewidth}{!}{%
\\begin{tabularx}{\\textwidth}{l c l l l l}
Attack name & attack config & TPR: 1.0\\%FPR & 0.1\\%FPR & 0.01\\%FPR & 0.0\\%FPR \\\\
\\hline
"""
Attack name & attack config & TPR: 1.0\\%FPR & 0.1\\%FPR & 0.01\\%FPR & 0.0\\%FPR \\\\ \\hline """ # noqa: W291

# Convert config to latex table input
def config_latex_style(config: str) -> str:
Expand All @@ -505,10 +501,9 @@ def config_latex_style(config: str) -> str:

# Append all mia results to table
for res in results:
config = config_latex_style(res.id)
latex_content += f"""{"-".join(res.resultname.split("_"))} & {config} & {res.fixed_fpr_table["TPR@1.0%FPR"]} &
{res.fixed_fpr_table["TPR@0.1%FPR"]} & {res.fixed_fpr_table["TPR@0.01%FPR"]} &
{res.fixed_fpr_table["TPR@0.0%FPR"]} \\\\ \\hline"""
config = config_latex_style(get_config_name(res.config))
latex_content += f"""
{"-".join(res.resultname.split("_"))} & {config} & {res.fixed_fpr_table["TPR@1.0%FPR"]} & {res.fixed_fpr_table["TPR@0.1%FPR"]} & {res.fixed_fpr_table["TPR@0.01%FPR"]} & {res.fixed_fpr_table["TPR@0.0%FPR"]} \\\\ \\hline """ # noqa: E501
latex_content += """
\\end{tabularx}
}
Expand Down
14 changes: 14 additions & 0 deletions leakpro/reporting/report_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,12 @@ def create_results_syn(
def create_report(self:Self) -> None:
"""Method to create PDF report."""

# Make sure results have been read and created
if not hasattr(self, "results"):
self.load_results()
if self.pdf_results and all(not value for value in self.pdf_results.values()):
self.create_results_all()

# Create initial part of the document.
self._init_pdf()

Expand All @@ -169,6 +175,9 @@ def create_report(self:Self) -> None:
# Compile the PDF
self._compile_pdf()

# Empty result variables
self._reset_result()

def _init_pdf(self:Self) -> None:
self.latex_content = """
\\documentclass{article}
Expand Down Expand Up @@ -202,3 +211,8 @@ def _compile_pdf(self:Self) -> None:
except Exception as e:
self.logger.info(f"Could not compile PDF: {e}")
self.logger.info("Make sure to install pdflatex with apt install texlive-latex-base")

def _reset_result(self:Self) -> None:
self.results = []
for key in self.pdf_results:
self.pdf_results[key] = []
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,10 @@ def evaluate(self: Self) -> EvaluationResults:
self.naive_guesses = self.inference_attack(naive=True)
# Set results
self.results = EvaluationResults(
n_total = self.n_attacks,
n_main = self.main_guesses.count,
n_naive = self.naive_guesses.count,
n_main_total = self.n_attacks,
n_main_success = self.main_guesses.count,
n_naive_total = self.n_attacks,
n_naive_success = self.naive_guesses.count,
confidence_level = self.confidence_level
)
return self.results
Original file line number Diff line number Diff line change
Expand Up @@ -200,24 +200,25 @@ def evaluate(self: Self) -> EvaluationResults:
"""Run the linkability attacks (main and naive) and set and return results."""
# Main linkability attack
self.main_links = main_linkability_attack(
ori=self.ori,
syn=self.syn,
n_attacks=self.n_attacks,
aux_cols=self.aux_cols,
n_neighbors=self.n_neighbors,
n_jobs=self.n_jobs,
ori = self.ori,
syn = self.syn,
n_attacks = self.n_attacks,
aux_cols = self.aux_cols,
n_neighbors = self.n_neighbors,
n_jobs = self.n_jobs
)
# Naive linkability attack
self.naive_links = naive_linkability_attack(
n_synthetic=self.syn.shape[0],
n_attacks=self.n_attacks,
n_neighbors=self.n_neighbors
n_synthetic = self.syn.shape[0],
n_attacks = self.n_attacks,
n_neighbors = self.n_neighbors
)
# Set results
self.results = EvaluationResults(
n_total = self.n_attacks,
n_main = self.main_links.count,
n_naive = self.naive_links.count,
n_main_total = self.n_attacks,
n_main_success = self.main_links.count,
n_naive_total = self.n_attacks,
n_naive_success = self.naive_links.count,
confidence_level = self.confidence_level
)
return self.results
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ class UniqueSinglingOutQueries(BaseModel):
queries: List[str] = []
idxs: List[int] = []
count: int = 0
len_passed_queries: int = 0

def evaluate_queries(self: Self, *, queries: List[str]) -> Self:
"""Evaluate queries on self.df.
Expand All @@ -113,6 +114,7 @@ def evaluate_queries(self: Self, *, queries: List[str]) -> Self:
self.queries = []
self.idxs = []
self.count = 0
self.len_passed_queries = len(queries)
#Iterate through queries
for query in queries:
self.check_and_append(query=query)
Expand Down Expand Up @@ -406,9 +408,10 @@ def evaluate(self: Self) -> EvaluationResults:
)
# Set results
self.results = EvaluationResults(
n_total = self.n_attacks,
n_main = self.main_queries.count,
n_naive = self.naive_queries.count,
n_main_total = self.main_queries.len_passed_queries,
n_main_success = self.main_queries.count,
n_naive_total = self.naive_queries.len_passed_queries,
n_naive_success = self.naive_queries.count,
confidence_level = self.confidence_level
)
return self.results
Loading

0 comments on commit 2168986

Please sign in to comment.