Triplet Loss in Keras/Tensorflow backend

import tensorflow as tf tf.set_random_seed(1) from keras.models import Model from keras.layers import Input, merge from keras.optimizers import Adam ALPHA = 0.2 # Triplet Loss Parameter # Source: https://github.com/davidsandberg/facenet/blob/master/src/facenet.py def triplet_loss(x): anchor, positive, negative = x pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1) neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1) basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), ALPHA) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss # Builds an embedding for each example (i.e., positive, negative, anchor) # Then calculates the triplet loss between their embedding. # Then applies identity loss on the triplet loss value to minimize it on training. def build_model(input_shape): # Standardizing the input shape order K.set_image_dim_ordering('th') positive_example = Input(shape=input_shape) negative_example = Input(shape=input_shape) anchor_example = Input(shape=input_shape) # Create Common network to share the weights along different examples (+/-/Anchor) embedding_network = create_embedding_network(input_shape) positive_embedding = embedding_network(positive_example) negative_embedding = embedding_network(negative_example) anchor_embedding = embedding_network(anchor_example) loss = merge([anchor_embedding, positive_embedding, negative_embedding], mode=triplet_loss, output_shape=(1,)) model = Model(inputs=[anchor_example, positive_example, negative_example], outputs=loss) model.compile(loss='mean_absolute_error', optimizer=Adam()) return model # When fitting the model (i.e., model.fit()); use as an input [anchor_example, # positive_example, negative_example] in that order and as an output zero. # The reason to use the output as zero is that you are trying to minimize the # triplet loss as much as possible and the minimum value of the loss is zero.

1 Response

Hi, I'm using your code as pattern for my, as I'm trying to implement triplet loss with keras too. The main difference is that I'm using a sequential model, so I can not use "merge". My model looks like this:

def build_model(img_x, img_y):
input_shape = (img_x, img_y, 3)
reid_model = Sequential()
reid_model.add(Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
activation='relu',
input_shape=input_shape))
reid_model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
reid_model.add(Flatten())
reid_model.add(Dense(512, activation='sigmoid'))

anchor = Input(shape=(128, 254, 3))
positive = Input(shape=(128, 254, 3))
negative = Input(shape=(128, 254, 3))

anchor_embed = reid_model(anchor)
positive_embed = reid_model(positive)
negative_embed = reid_model(negative)

dist_anchor_positive = distance(anchor_embed, positive_embed)
dist_anchor_negative = distance(anchor_embed, negative_embed)
loss = triplets_max(dist_anchor_positive, dist_anchor_negative, 0.05)

model = Model(inputs=[anchor, positive, negative], outputs=loss)
model.compile(optimizer='Adam', loss='mean_absolute_error')
return model

But I'm getting a ValueError: Output tensors to a Model must be the output of a Keras `Layer`. Any suggestion?

Write a comment

You can use [html][/html], [css][/css], [php][/php] and more to embed the code. Urls are automatically hyperlinked. Line breaks and paragraphs are automatically generated.