Skip to content

Commit

Permalink
Handle multi-dimension inputs
Browse files Browse the repository at this point in the history
  • Loading branch information
AnasNeumann committed Nov 18, 2023
1 parent 7059db9 commit 33a17e3
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 22 deletions.
30 changes: 19 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Neumann, Anas. (2023). Simple Python and TensorFlow implementation of the optimi

## Complete code
```python
def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=keras.losses.mse, f_loss=keras.losses.MeanSquaredError(), meta_epochs=100, meta_tasks_per_epoch=[10, 30], validation_split=0.2, k_folds=0, tasks=[], cumul=False):
def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=keras.losses.mse, f_loss=keras.losses.MeanSquaredError(), meta_epochs=100, meta_tasks_per_epoch=[10, 30], inputs_dimension=1, validation_split=0.2, k_folds=0, tasks=[], cumul=False):
"""
Simple MAML algorithm implementation for supervised regression.
:param model: A Keras model to be trained using MAML.
Expand All @@ -48,6 +48,7 @@ def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=
:param c_loss: Loss function for calculating training loss.
:param meta_epochs: Number of meta-training epochs.
:param meta_tasks_per_epoch: Range of tasks to sample per epoch.
:param inputs_dimension: the input dimension (for sequence-to-sequence models).
:param validation_split: Ratio of data to use for validation in each task (could be fixed or random between two values).
:param k_folds: cross-validation with k_folds each time a task is called for meta-learning.
:param tasks: List of tasks for meta-training.
Expand All @@ -56,37 +57,44 @@ def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=
"""
if tf.config.list_physical_devices('GPU'):
with tf.device('/GPU:0'):
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, validation_split, k_folds, tasks, cumul)
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, inputs_dimension, validation_split, k_folds, tasks, cumul)
else:
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, validation_split, k_folds, tasks, cumul)
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, inputs_dimension, validation_split, k_folds, tasks, cumul)

def _build_task(t, validation_split, k_folds):
def _build_task(t, inputs_dimension, validation_split, k_folds):
"""
Build task t by splitting train_input, test_input, train_target, test_target if it's not already done.
This function is flexible and handle both randon validation_splits and k_folds.
:param t: a task to learn during the meta-pre-training stage
:param inputs_dimension: the input dimension (for sequence-to-sequence models).
:param validation_split: optional ratio of data to use for training in each task (could be fixed or random between two values).
:param k_folds: optional cross-validation with k_folds each time a task is called for meta-learning.
:return: train_input, test_input, train_target, test_target
"""
if "train" in t and "test" in t:
return t["train"]["inputs"], t["test"]["inputs"], t["train"]["target"], t["test"]["target"]
train_input = t["train"]["inputs"] if inputs_dimension<=1 else [t["train"]["inputs"] for d in range(inputs_dimension)]
test_input = t["test"]["inputs"] if inputs_dimension<=1 else [t["test"]["inputs"] for d in range(inputs_dimension)]
return t["train"]["inputs"], t["test"]["inputs"], t["train"]["target"], t["test"]["target"]
elif k_folds>0:
fold = random.randint(0, k_folds-1)
fold_size = (len(t["inputs"]) // k_folds)
v_start = fold * fold_size
v_end = (fold + 1) * fold_size if fold < k_folds - 1 else len(t["inputs"])
train_input, train_target = np.concatenate((t["inputs"][:v_start], t["inputs"][v_end:]), axis=0), np.concatenate((t["target"][:v_start], t["target"][v_end:]), axis=0)
train_target, test_target = t["inputs"][v_start:v_end], t["target"][v_start:v_end]
t_i = np.concatenate((t["inputs"][:v_start], t["inputs"][v_end:]), axis=0)
train_input = t_i if inputs_dimension<=1 else [t_i for d in range(inputs_dimension)]
test_input = t["inputs"][v_start:v_end] if inputs_dimension<=1 else [t["inputs"][v_start:v_end] for d in range(inputs_dimension)]
train_target = np.concatenate((t["target"][:v_start], t["target"][v_end:]), axis=0)
test_target = t["target"][v_start:v_end]
return train_input, test_input, train_target, test_target
else:
v = random.uniform(validation_split[0], validation_split[1]) if isinstance(validation_split,list) else validation_split
split_idx = int(len(t["inputs"]) * v)
train_input, test_input = t["inputs"][:split_idx], t["inputs"][split_idx:]
split_idx = int(len(t["inputs"]) * v)
train_input = t["inputs"][:split_idx] if inputs_dimension<=1 else [t["inputs"][:split_idx] for d in range(inputs_dimension)]
test_input = t["inputs"][split_idx:] if inputs_dimension<=1 else [t["inputs"][split_idx:] for d in range(inputs_dimension)]
train_target, test_target = t["target"][:split_idx], t["target"][split_idx:]
return train_input, test_input, train_target, test_target

def _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, validation_split, k_folds, tasks, cumul):
def _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, inputs_dimension, validation_split, k_folds, tasks, cumul):
log_step = meta_epochs // 10 if meta_epochs > 10 else 1
optim_test=optimizer(learning_rate=alpha)
optim_test.build(model.trainable_variables)
Expand All @@ -104,7 +112,7 @@ def _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, me
model_copy.compile(loss=f_loss, optimizer=optim_train)
for _ in range(num_tasks_sampled):
t = random.choice(tasks)
train_input, test_input, train_target, test_target = _build_task(t, validation_split, k_folds)
train_input, test_input, train_target, test_target = _build_task(t, input_dimenstion, validation_split, k_folds)

# 1. Inner loop: Update the model copy on the current task
with tf.GradientTape(watch_accessed_variables=False) as train_tape:
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setup(
name='simplemaml',
version='1.2.6',
version='1.2.7',
description='A generic Python and TensorFlow function that implements a simple version of the "Model-Agnostic Meta-Learning (MAML) Algorithm for Fast Adaptation of Deep Networks" as designed by Chelsea Finn et al. 2017',
long_description=long_description,
long_description_content_type='text/markdown',
Expand Down
28 changes: 18 additions & 10 deletions simplemaml.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
import random

def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=keras.losses.mse, f_loss=keras.losses.MeanSquaredError(), meta_epochs=100, meta_tasks_per_epoch=[10, 30], validation_split=0.2, k_folds=0, tasks=[], cumul=False):
def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=keras.losses.mse, f_loss=keras.losses.MeanSquaredError(), meta_epochs=100, meta_tasks_per_epoch=[10, 30], inputs_dimension=1, validation_split=0.2, k_folds=0, tasks=[], cumul=False):
"""
Simple MAML algorithm implementation for supervised regression.
:param model: A Keras model to be trained using MAML.
Expand All @@ -13,6 +13,7 @@ def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=
:param c_loss: Loss function for calculating training loss.
:param meta_epochs: Number of meta-training epochs.
:param meta_tasks_per_epoch: Range of tasks to sample per epoch.
:param inputs_dimension: the input dimension (for sequence-to-sequence models).
:param validation_split: Ratio of data to use for validation in each task (could be fixed or random between two values).
:param k_folds: cross-validation with k_folds each time a task is called for meta-learning.
:param tasks: List of tasks for meta-training.
Expand All @@ -21,37 +22,44 @@ def MAML(model, alpha=0.005, beta=0.005, optimizer=keras.optimizers.SGD, c_loss=
"""
if tf.config.list_physical_devices('GPU'):
with tf.device('/GPU:0'):
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, validation_split, k_folds, tasks, cumul)
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, inputs_dimension, validation_split, k_folds, tasks, cumul)
else:
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, validation_split, k_folds, tasks, cumul)
return _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, inputs_dimension, validation_split, k_folds, tasks, cumul)

def _build_task(t, validation_split, k_folds):
def _build_task(t, inputs_dimension, validation_split, k_folds):
"""
Build task t by splitting train_input, test_input, train_target, test_target if it's not already done.
This function is flexible and handle both randon validation_splits and k_folds.
:param t: a task to learn during the meta-pre-training stage
:param inputs_dimension: the input dimension (for sequence-to-sequence models).
:param validation_split: optional ratio of data to use for training in each task (could be fixed or random between two values).
:param k_folds: optional cross-validation with k_folds each time a task is called for meta-learning.
:return: train_input, test_input, train_target, test_target
"""
if "train" in t and "test" in t:
train_input = t["train"]["inputs"] if inputs_dimension<=1 else [t["train"]["inputs"] for d in range(inputs_dimension)]
test_input = t["test"]["inputs"] if inputs_dimension<=1 else [t["test"]["inputs"] for d in range(inputs_dimension)]
return t["train"]["inputs"], t["test"]["inputs"], t["train"]["target"], t["test"]["target"]
elif k_folds>0:
fold = random.randint(0, k_folds-1)
fold_size = (len(t["inputs"]) // k_folds)
v_start = fold * fold_size
v_end = (fold + 1) * fold_size if fold < k_folds - 1 else len(t["inputs"])
train_input, train_target = np.concatenate((t["inputs"][:v_start], t["inputs"][v_end:]), axis=0), np.concatenate((t["target"][:v_start], t["target"][v_end:]), axis=0)
train_target, test_target = t["inputs"][v_start:v_end], t["target"][v_start:v_end]
t_i = np.concatenate((t["inputs"][:v_start], t["inputs"][v_end:]), axis=0)
train_input = t_i if inputs_dimension<=1 else [t_i for d in range(inputs_dimension)]
test_input = t["inputs"][v_start:v_end] if inputs_dimension<=1 else [t["inputs"][v_start:v_end] for d in range(inputs_dimension)]
train_target = np.concatenate((t["target"][:v_start], t["target"][v_end:]), axis=0)
test_target = t["target"][v_start:v_end]
return train_input, test_input, train_target, test_target
else:
v = random.uniform(validation_split[0], validation_split[1]) if isinstance(validation_split,list) else validation_split
split_idx = int(len(t["inputs"]) * v)
train_input, test_input = t["inputs"][:split_idx], t["inputs"][split_idx:]
split_idx = int(len(t["inputs"]) * v)
train_input = t["inputs"][:split_idx] if inputs_dimension<=1 else [t["inputs"][:split_idx] for d in range(inputs_dimension)]
test_input = t["inputs"][split_idx:] if inputs_dimension<=1 else [t["inputs"][split_idx:] for d in range(inputs_dimension)]
train_target, test_target = t["target"][:split_idx], t["target"][split_idx:]
return train_input, test_input, train_target, test_target

def _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, validation_split, k_folds, tasks, cumul):
def _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, meta_tasks_per_epoch, inputs_dimension, validation_split, k_folds, tasks, cumul):
log_step = meta_epochs // 10 if meta_epochs > 10 else 1
optim_test=optimizer(learning_rate=alpha)
optim_test.build(model.trainable_variables)
Expand All @@ -69,7 +77,7 @@ def _MAML_compute(model, alpha, beta, optimizer, c_loss, f_loss, meta_epochs, me
model_copy.compile(loss=f_loss, optimizer=optim_train)
for _ in range(num_tasks_sampled):
t = random.choice(tasks)
train_input, test_input, train_target, test_target = _build_task(t, validation_split, k_folds)
train_input, test_input, train_target, test_target = _build_task(t, input_dimenstion, validation_split, k_folds)

# 1. Inner loop: Update the model copy on the current task
with tf.GradientTape(watch_accessed_variables=False) as train_tape:
Expand Down

0 comments on commit 33a17e3

Please sign in to comment.