Skip to content

Commit

Permalink
Rename training_predictions to target_predictions
Browse files Browse the repository at this point in the history
  • Loading branch information
fbickfordsmith authored and Dref360 committed May 24, 2024
1 parent 7ceba0a commit 31fde79
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 59 deletions.
100 changes: 50 additions & 50 deletions baal/active/heuristics/heuristics.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,13 @@ def singlepass(fn):
"""

@_wraps(fn)
def wrapper(self, probabilities, training_predictions=None):
def wrapper(self, probabilities, target_predictions=None):
if probabilities.ndim >= 3:
# Expected shape : [n_sample, n_classes, ..., n_iterations]
probabilities = probabilities.mean(-1)
if training_predictions is not None and training_predictions.ndim >= 3:
training_predictions = training_predictions.mean(-1)
return fn(self, probabilities, training_predictions)
if target_predictions is not None and target_predictions.ndim >= 3:
target_predictions = target_predictions.mean(-1)
return fn(self, probabilities, target_predictions)

return wrapper

Expand All @@ -71,13 +71,13 @@ def requireprobs(fn):
"""

@_wraps(fn)
def wrapper(self, probabilities, training_predictions=None):
def wrapper(self, probabilities, target_predictions=None):
# Expected shape : [n_sample, n_classes, ..., n_iterations]
probabilities = to_prob(probabilities)
training_predictions = (
to_prob(training_predictions) if training_predictions is not None else None
target_predictions = (
to_prob(target_predictions) if target_predictions is not None else None
)
return fn(self, probabilities, training_predictions=training_predictions)
return fn(self, probabilities, target_predictions=target_predictions)

return wrapper

Expand All @@ -96,7 +96,7 @@ def require_single_item(fn):
"""

@_wraps(fn)
def wrapper(self, probabilities, training_predictions=None):
def wrapper(self, probabilities, target_predictions=None):
# Expected single shape : [n_sample, n_classes, ..., n_iterations]
if isinstance(probabilities, (list, tuple)):
if len(probabilities) == 1:
Expand All @@ -109,7 +109,7 @@ def wrapper(self, probabilities, training_predictions=None):
" we suggest using baal.active.heuristics.CombineHeuristics"
)

return fn(self, probabilities, training_predictions)
return fn(self, probabilities, target_predictions)

return wrapper

Expand Down Expand Up @@ -163,26 +163,26 @@ def __init__(self, shuffle_prop=DEPRECATED, reverse=False, reduction="none"):
self._reduction_name = reduction
self.reduction = reduction if callable(reduction) else available_reductions[reduction]

def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
"""
Compute the score according to the heuristic.
Args:
predictions (ndarray): Array of predictions
training_predictions (Optional[ndarray]): Array of predictions on train set.
predictions (ndarray): Array of predictions.
target_predictions (Optional[ndarray]): Array of predictions on target inputs.
Returns:
Array of scores.
"""
raise NotImplementedError

def get_uncertainties_generator(self, predictions, training_predictions=None):
def get_uncertainties_generator(self, predictions, target_predictions=None):
"""
Compute the score according to the heuristic.
Args:
predictions (Iterable): Generator of predictions
training_predictions (Optional[ndarray]): Generator of training predictions
predictions (Iterable): Generator of predictions.
target_predictions (Optional[ndarray]): Generator of predictions on target inputs.
Raises:
ValueError if the generator is empty.
Expand All @@ -192,26 +192,26 @@ def get_uncertainties_generator(self, predictions, training_predictions=None):
"""
acc = []
for pred in predictions:
acc.append(self.get_uncertainties(pred, training_predictions=training_predictions))
acc.append(self.get_uncertainties(pred, target_predictions=target_predictions))
if len(acc) == 0:
raise ValueError("No prediction! Cannot order the values!")
return np.concatenate(acc)

def get_uncertainties(self, predictions, training_predictions=None):
def get_uncertainties(self, predictions, target_predictions=None):
"""
Get the uncertainties.
Args:
predictions (ndarray): Array of predictions
training_predictions (ndarray): Array of predictions on training set.
predictions (ndarray): Array of predictions.
target_predictions (ndarray): Array of predictions on target inputs.
Returns:
Array of uncertainties
"""
if isinstance(predictions, Tensor):
predictions = predictions.numpy()
scores = self.compute_score(predictions, training_predictions=training_predictions)
scores = self.compute_score(predictions, target_predictions=target_predictions)
scores = self.reduction(scores)
if not np.all(np.isfinite(scores)):
fixed = 0.0 if self.reversed else 10000
Expand Down Expand Up @@ -251,13 +251,13 @@ def reorder_indices(self, scores):
ranks = _shuffle_subset(ranks, self.shuffle_prop)
return ranks

def get_ranks(self, predictions, training_predictions=None):
def get_ranks(self, predictions, target_predictions=None):
"""
Rank the predictions according to their uncertainties.
Args:
predictions (ndarray): [batch_size, C, ..., Iterations]
training_predictions (Optional[ndarray]): [batch_size, C, ..., Iterations]
target_predictions (Optional[ndarray]): [batch_size, C, ..., Iterations]
Returns:
Ranked index according to the uncertainty (highest to lowes).
Expand All @@ -266,19 +266,19 @@ def get_ranks(self, predictions, training_predictions=None):
"""
if isinstance(predictions, types.GeneratorType):
scores = self.get_uncertainties_generator(
predictions, training_predictions=training_predictions
predictions, target_predictions=target_predictions
)
else:
scores = self.get_uncertainties(predictions, training_predictions=training_predictions)
scores = self.get_uncertainties(predictions, target_predictions=target_predictions)

return self.reorder_indices(scores), scores

def __call__(self, predictions, training_predictions=None):
def __call__(self, predictions, target_predictions=None):
"""Rank the predictions according to their uncertainties.
Only return the scores and not the associated uncertainties.
"""
return self.get_ranks(predictions, training_predictions)[0]
return self.get_ranks(predictions, target_predictions)[0]


class BALD(AbstractHeuristic):
Expand All @@ -300,13 +300,13 @@ def __init__(self, shuffle_prop=DEPRECATED, reduction="none"):

@require_single_item
@requireprobs
def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
"""
Compute the score according to the heuristic.
Args:
predictions (ndarray): Array of predictions
training_predictions (Optional[ndarray]): [batch_size, C, ..., Iterations]
target_predictions (Optional[ndarray]): [batch_size, C, ..., Iterations]
Returns:
Array of scores.
Expand Down Expand Up @@ -445,13 +445,13 @@ def _joint_entropy(self, predictions, selected):

@require_single_item
@requireprobs
def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
"""
Compute the score according to the heuristic.
Args:
predictions (ndarray): Array of predictions [batch_size, C, Iterations]
training_predictions (Optional[ndarray]): [batch_size, C, ..., Iterations]
target_predictions (Optional[ndarray]): [batch_size, C, ..., Iterations]
Notes:
Only Classification is supported, not semantic segmentation or other.
Expand Down Expand Up @@ -495,13 +495,13 @@ def compute_score(self, predictions, training_predictions=None):

return uncertainties

def get_ranks(self, predictions, training_predictions=None):
def get_ranks(self, predictions, target_predictions=None):
"""
Rank the predictions according to their uncertainties.
Args:
predictions (ndarray): [batch_size, C, Iterations]
training_predictions (Optional[ndarray]): [batch_size, C, Iterations]
target_predictions (Optional[ndarray]): [batch_size, C, Iterations]
Returns:
Ranked index according to the uncertainty (highest to lowest).
Expand Down Expand Up @@ -540,7 +540,7 @@ def __init__(self, shuffle_prop=DEPRECATED, reduction="mean"):
super().__init__(shuffle_prop=shuffle_prop, reverse=True, reduction=reduction)

@require_single_item
def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
assert predictions.ndim >= 3
return np.var(predictions, -1)

Expand All @@ -561,7 +561,7 @@ def __init__(self, shuffle_prop=DEPRECATED, reduction="none"):
@require_single_item
@singlepass
@requireprobs
def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
return scipy.stats.entropy(np.swapaxes(predictions, 0, 1))


Expand All @@ -583,7 +583,7 @@ def __init__(self, shuffle_prop=DEPRECATED, reduction="none"):
@require_single_item
@singlepass
@requireprobs
def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
sort_arr = np.sort(predictions, axis=1)
return sort_arr[:, -1] - sort_arr[:, -2]

Expand All @@ -602,7 +602,7 @@ def __init__(self, shuffle_prop=DEPRECATED, reduction="none"):

@require_single_item
@singlepass
def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
return np.max(predictions, axis=1)


Expand All @@ -617,7 +617,7 @@ class Precomputed(AbstractHeuristic):
def __init__(self, shuffle_prop=DEPRECATED, reverse=False):
super().__init__(shuffle_prop, reverse=reverse)

def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
return predictions


Expand All @@ -637,7 +637,7 @@ def __init__(self, shuffle_prop=DEPRECATED, reduction="none", seed=None):
else:
self.rng = np.random

def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
return self.rng.rand(predictions.shape[0])


Expand Down Expand Up @@ -682,7 +682,7 @@ def __init__(self, heuristics: List, weights: List, reduction="mean", shuffle_pr
else:
raise Exception("heuristics should have the same value for `revesed` parameter")

def get_uncertainties(self, predictions, training_predictions=None):
def get_uncertainties(self, predictions, target_predictions=None):
"""
Computes the score for each part of predictions according to the assigned heuristic.
Expand All @@ -691,28 +691,28 @@ def get_uncertainties(self, predictions, training_predictions=None):
[confidence_predictions: nd.array(), boundingbox_predictions: nd.array()]
Args:
predictions (list[ndarray]): list of predictions arrays
training_predictions (Optional[List[ndarray]): List of predictions on training dataset.
predictions (list[ndarray]): List of predictions arrays.
target_predictions (Optional[List[ndarray]): List of predictions on target inputs.
Returns:
Array of uncertainties
"""

results = []
for ind, (prediction, train_pred) in enumerate(
zip_longest(predictions, training_predictions or [], fillvalue=None)
for ind, (prediction, target_pred) in enumerate(
zip_longest(predictions, target_predictions or [], fillvalue=None)
):
if isinstance(predictions[0], types.GeneratorType):
results.append(
self.composed_heuristic[ind].get_uncertainties_generator(
prediction, training_predictions=train_pred
prediction, target_predictions=target_pred
)
)
else:
results.append(
self.composed_heuristic[ind].get_uncertainties(
prediction, training_predictions=train_pred
prediction, target_predictions=target_pred
)
)
return results
Expand Down Expand Up @@ -788,7 +788,7 @@ def marginal_entropy_from_probs(self, probs):
return scores # [N,]

@requireprobs
def compute_score(self, predictions, training_predictions):
def compute_score(self, predictions, target_predictions):
"""
Compute the expected predictive information gain for each candidate input, x_i:
EPIG(x_i) = E_{p_*(x_*)}[I(y;y_*|x_i,x_*)]
Expand All @@ -798,16 +798,16 @@ def compute_score(self, predictions, training_predictions):
Args:
predictions (ndarray, [N_p, C, K]): p(y|x_i,θ_j) for i in [1, N_p] and j in [1, K].
training_predictions (ndarray, [N_t, C, K]): p(y|x_*^i,θ_j) for i in [1, N_t] and j in [1, K].
target_predictions (ndarray, [N_t, C, K]): p(y|x_*^i,θ_j) for i in [1, N_t] and j in [1, K].
Returns:
scores (ndarray, [N,]): EPIG(x_i) for i in [1, N_p].
"""
assert predictions.ndim == 3, "EPIG only supports classification for now."
assert training_predictions.ndim == 3, "EPIG only supports classification for now."
assert target_predictions.ndim == 3, "EPIG only supports classification for now."

probs_pool = torch.Tensor(predictions) # [N_p, C, K]
probs_targ = torch.Tensor(training_predictions) # [N_t, C, K]
probs_targ = torch.Tensor(target_predictions) # [N_t, C, K]

N_t, C, K = probs_targ.shape

Expand Down
16 changes: 8 additions & 8 deletions baal/active/heuristics/heuristics_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@ def to_prob_torch(probabilities):
def requireprobs(fn):
"""Will convert logits to probs if needed"""

def wrapper(self, probabilities, training_predictions=None):
def wrapper(self, probabilities, target_predictions=None):
# Expected shape : [n_sample, n_classes, ..., n_iterations]
probabilities = to_prob_torch(probabilities)
if training_predictions is not None:
training_predictions = to_prob_torch(training_predictions)
return fn(self, probabilities, training_predictions)
if target_predictions is not None:
target_predictions = to_prob_torch(target_predictions)
return fn(self, probabilities, target_predictions)

return wrapper

Expand Down Expand Up @@ -82,12 +82,12 @@ def __init__(
reduction if callable(reduction) else available_reductions[reduction]
)

def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
"""
Compute the score according to the heuristic.
Args:
predictions (ndarray): Array of predictions
training_predictions (ndarray): Array of predictions from training set.
predictions (ndarray): Array of predictions.
target_predictions (ndarray): Array of predictions on target inputs.
Returns:
Array of scores.
Expand Down Expand Up @@ -158,7 +158,7 @@ def __init__(
)

@requireprobs
def compute_score(self, predictions, training_predictions=None):
def compute_score(self, predictions, target_predictions=None):
assert predictions.ndimension() >= 3
# [n_sample, n_class, ..., n_iterations]
expected_entropy = -torch.mean(
Expand Down
2 changes: 1 addition & 1 deletion tests/active/heuristic_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def test_that_logits_get_converted_to_probabilities(logits):

# define a random func:
@requireprobs
def wrapped(_, logits, training_predictions=None):
def wrapped(_, logits, target_predictions=None):
return logits

probability_distribution = wrapped(None, logits)
Expand Down

0 comments on commit 31fde79

Please sign in to comment.