0

我是 tensorflow2 的新手,使用 tensorflow2.3.1,cpu 版本。

我以子类化方式定义模型,在显示模型结构时,遇到错误“tensorflow.python.framework.errors_impl.InvalidArgumentError: Cannot convert a Tensor of dtype resource to a NumPy array”,指向以下BST_DSSM.build_model 中的行“self.item_sequence_embeddings = tf.nn.embedding_lookup(”

我浏览了类似的问题,但找不到满意的解决方案。任何帮助将不胜感激 :)

下面是我的代码。

import tensorflow as tf


class MultiHeadAttention(tf.keras.layers.Layer):
    """ def multi head attention layer

    q, k, v multiplied by Wq, Wk, Wv respectively -> q', k', v'
    q' * k' -> w, w / sqrt(q'.shape[1]) -> w'
    w' * v' -> z, z * Wz -> z'
    z' add v (residual), then goes through LRelu, do a LN at last
    """

    def __init__(
            self,
            scope_name,
            num_units=8,
            num_heads=1,
            embed_dim=8,
            has_residual=True,
            dropout_keep_prob=1.0):
        super(MultiHeadAttention, self).__init__()
        assert num_units % num_heads == 0
        assert scope_name in ["user", "item"]
        self.num_heads = num_heads
        self.num_units = num_units
        self.embed_dim = embed_dim
        self.dropout_keep_prob = dropout_keep_prob

        self.Wq = tf.keras.layers.Dense(
            units=self.num_units, activation=tf.nn.leaky_relu, name=f"{scope_name}_Wq")
        self.Wk = tf.keras.layers.Dense(
            units=self.num_units, activation=tf.nn.leaky_relu, name=f"{scope_name}_Wk")
        self.Wv = tf.keras.layers.Dense(
            units=self.num_units, activation=tf.nn.leaky_relu, name=f"{scope_name}_Wv")

        self.has_residual = has_residual
        self.Wz = tf.keras.layers.Dense(embed_dim)

    def call(self, queries, keys_, values):
        """

        :param queries: of shape [batch_size, max_length, emb_dim]
        :param keys_:  of shape [batch_size, max_length, emb_dim]
        :param values: of shape [batch_size, max_length, emb_dim]
        :return:
        """
        assert values.get_shape().as_list()[-1] == self.embed_dim
        assert queries.get_shape().as_list()[-1] == self.embed_dim
        assert keys_.get_shape().as_list()[-1] == self.embed_dim
        # Linear projections
        Q = self.Wq(queries)
        K = self.Wk(keys_)
        V = self.Wv(values)

        # Split and concat
        Q_ = tf.concat(tf.split(Q, self.num_heads, axis=2), axis=0)
        K_ = tf.concat(tf.split(K, self.num_heads, axis=2), axis=0)
        V_ = tf.concat(tf.split(V, self.num_heads, axis=2), axis=0)

        # Multiplication
        weights = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))

        # Scale
        weights = weights / (K_.get_shape().as_list()[-1] ** 0.5)

        # convert to prob vector
        weights = tf.nn.softmax(weights)

        # Dropouts
        if 0 < self.dropout_keep_prob < 1:
            weights = tf.keras.layers.AlphaDropout(
                rate=1 - self.dropout_keep_prob)(weights)

        # Weighted sum
        # [batch_size * num_heads, max_length, num_units / num_heads]
        outputs = tf.matmul(weights, V_)

        # Restore shape to [batch_size, max_length, num_units]
        z = tf.concat(tf.split(outputs, self.num_heads, axis=0), axis=2)

        # Restore shape to [batch_size, max_length, embed_dim]
        z = self.Wz(z)

        # Residual connection
        if self.has_residual:
            z += values

        z = tf.nn.leaky_relu(z)

        # Normalize
        z = tf.keras.layers.LayerNormalization(
            beta_initializer="zeros", gamma_initializer="ones")(z)

        return z


class BST_DSSM(tf.keras.Model):
    """define BST+DSSM model stucture
    """
    def __init__(self, model_dir,
                 item_embedding=None, user_embedding=None,
                 embedding_size=8,
                 vocab_size=1000,
                 max_length_item=15, max_length_user=6,
                 epoch=10, batch_size=256, blocks=2,
                 learning_rate=0.001, optimizer_type="adam",
                 batch_norm=0, batch_norm_decay=0.995,
                 verbose=False, random_seed=2019,
                 l2_reg=0.0, has_residual=True):
        """
        initial model related parms and tensors
        """
        super(BST_DSSM, self).__init__()
        # denote as K, size of the feature embedding
        self.embedding_size = embedding_size

        self.l2_reg = l2_reg

        self.epoch = epoch
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.optimizer_type = optimizer_type
        self.optimizer = None

        self.blocks = blocks
        self.batch_norm = batch_norm
        self.batch_norm_decay = batch_norm_decay

        self.verbose = verbose
        self.random_seed = random_seed
        self.model_dir = model_dir

        # self._init_graph()
        self.vocab_size = vocab_size
        self.max_length_item = max_length_item
        self.max_length_user = max_length_user
        self.has_residual = has_residual
        self.model = None

        self.item_embedding = item_embedding
        self.user_embedding = user_embedding

        self.mha_user = MultiHeadAttention("user", num_units=embedding_size)
        self.mha_item = MultiHeadAttention("item", num_units=embedding_size)

    def _get_item_embedding_matrix(self):
        if self.item_embedding is None:
            std = 0.1
            minval = -std
            maxval = std
            emb_matrix = tf.Variable(
                tf.random.uniform(
                    [self.vocab_size, self.embedding_size],
                    minval, maxval,
                    seed=self.random_seed,
                    dtype=tf.float32),
                name="item_embedding")
            self.item_embedding = emb_matrix

    def _get_user_embedding_matrix(self):
        if self.user_embedding is None:
            std = 0.1
            minval = -std
            maxval = std
            emb_matrix = tf.Variable(
                tf.random.uniform(
                    [self.vocab_size, self.embedding_size],
                    minval, maxval,
                    seed=self.random_seed,
                    dtype=tf.float32),
                name="user_embedding")
            self.user_embedding = emb_matrix

    def build_model(self):
        # initialize lut
        self._get_item_embedding_matrix()
        self._get_user_embedding_matrix()

        item_inputs = tf.keras.Input(
            shape=(
                self.max_length_item
            ),
            dtype=tf.int32,
            name="item_sequence_idx")
        user_inputs = tf.keras.Input(
            shape=(
                self.max_length_user
            ),
            dtype=tf.int32,
            name="user_sequence_idx")

        # user and item use different lut, similarly to DSSM
        self.item_sequence_embeddings = tf.nn.embedding_lookup(
            self.item_embedding, item_inputs, name="item_sequence_embeddings")
        self.video_sequence_embeddings = tf.nn.embedding_lookup(
            self.user_embedding, user_inputs, name="video_sequence_embeddings")

        # self attn part
        for i in range(self.blocks):
            self.item_sequence_embeddings = self.mha_item(
                queries=self.item_sequence_embeddings,
                keys=self.item_sequence_embeddings,
                values=self.item_sequence_embeddings)

            self.video_sequence_embeddings = self.mha_user(
                queries=self.video_sequence_embeddings,
                keys=self.video_sequence_embeddings,
                values=self.video_sequence_embeddings)

        # max pooling
        self.item_sequence_embeddings = tf.nn.max_pool(
            self.item_sequence_embeddings,
            [1, self.max_length_item, 1],
            [1 for _ in range(len(self.item_sequence_embeddings.shape))],
            padding="VALID")
        self.video_sequence_embeddings = tf.nn.max_pool(
            self.video_sequence_embeddings,
            [1, self.max_length_user, 1],
            [1 for _ in range(len(self.video_sequence_embeddings.shape))],
            padding="VALID")

        # cosine similarity
        self.item_sequence_embeddings = tf.nn.l2_normalize(
            self.item_sequence_embeddings, axis=2)
        self.video_sequence_embeddings = tf.nn.l2_normalize(
            self.video_sequence_embeddings, axis=2)

        outputs = tf.matmul(
            self.item_sequence_embeddings,
            tf.transpose(self.video_sequence_embeddings, [0, 2, 1]))
        outputs = tf.reshape(outputs, [-1, 1])

        # optimizer
        if self.optimizer_type == "adam":
            self.optimizer = tf.keras.optimizers.Adam(
                learning_rate=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        elif self.optimizer_type == "adagrad":
            self.optimizer = tf.keras.optimizers.Adagrad(
                learning_rate=self.learning_rate,
                initial_accumulator_value=1e-8)
        elif self.optimizer_type == "gd":
            self.optimizer = tf.keras.optimizers.SGD(
                learning_rate=self.learning_rate)
        elif self.optimizer_type == "momentum":
            self.optimizer = tf.keras.optimizers.SGD(
                learning_rate=self.learning_rate, momentum=0.95)

        self.model = tf.keras.Model(
            inputs={
                "item_sequence_idx": item_inputs,
                "user_sequence_idx": user_inputs
            },
            outputs=outputs)

        self.model.compile(
            optimizer=self.optimizer,
            loss=self.loss_fn,
            metrics=[
                tf.keras.metrics.AUC(),
                tf.keras.metrics.binary_accuracy()])
4

1 回答 1

0

虽然我没有弄清楚为什么会出现这样的错误,但我已经通过定义一个调用方法来构建我的模型,代码如下

from conf_loader import (
    emb_dim, n_layer,
    item_max_len, user_max_len,
    batch_size, lr, l2_reg,
    vocab_size
)


class BST_DSSM(tf.keras.Model):
    """define BST+DSSM model stucture
    """

    def __init__(self,
                 item_embedding=None, user_embedding=None,
                 emb_dim=emb_dim,
                 vocab_size=vocab_size,
                 item_max_len=item_max_len, user_max_len=user_max_len,
                 epoch=10, batch_size=batch_size, n_layers=n_layer,
                 learning_rate=lr, optimizer_type="adam",
                 random_seed=2019,
                 l2_reg=l2_reg, has_residual=True):
        """
        initial model related parms and tensors
        """
        super(BST_DSSM, self).__init__()
        self.emb_dim = emb_dim

        self.l2_reg = l2_reg

        self.epoch = epoch
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.optimizer_type = optimizer_type

        self.blocks = n_layers

        self.random_seed = random_seed

        self.vocab_size = vocab_size
        self.item_max_len = item_max_len
        self.user_max_len = user_max_len
        self.has_residual = has_residual

        self.item_embedding = item_embedding
        self.user_embedding = user_embedding

        self.mha_user = MultiHeadAttention(scope_name="user", embed_dim=self.emb_dim)
        self.mha_item = MultiHeadAttention(scope_name="item", embed_dim=self.emb_dim)

        # optimizer
        if self.optimizer_type == "adam":
            self.optimizer = tf.keras.optimizers.Adam(
                learning_rate=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        elif self.optimizer_type == "adagrad":
            self.optimizer = tf.keras.optimizers.Adagrad(
                learning_rate=self.learning_rate,
                initial_accumulator_value=1e-8)
        elif self.optimizer_type == "gd":
            self.optimizer = tf.keras.optimizers.SGD(
                learning_rate=self.learning_rate)
        elif self.optimizer_type == "momentum":
            self.optimizer = tf.keras.optimizers.SGD(
                learning_rate=self.learning_rate, momentum=0.95)

        self.user_embedding = tf.keras.layers.Embedding(input_dim=self.vocab_size, output_dim=self.emb_dim)
        self.item_embedding = tf.keras.layers.Embedding(input_dim=self.vocab_size, output_dim=self.emb_dim)

    @tf.function
    def call(self, inputs, training=True):
        # multiple inputs
        item_inputs = inputs[0]
        user_inputs = inputs[1]

        item_sequence_embeddings = self.item_embedding(item_inputs)
        user_sequence_embeddings = self.user_embedding(user_inputs)

        # [batch_size, max_length, 16]
        for i in range(self.blocks):
            item_sequence_embeddings = self.mha_item(item_sequence_embeddings)
            user_sequence_embeddings = self.mha_user(user_sequence_embeddings)

        # [batch_size, 1, 16]
        item_outputs_max = tf.nn.max_pool(
            item_sequence_embeddings,
            [1, self.item_max_len, 1],
            [1 for _ in range(len(item_sequence_embeddings.shape))],
            padding="VALID")
        user_outputs_max = tf.nn.max_pool(
            user_sequence_embeddings,
            [1, self.user_max_len, 1],
            [1 for _ in range(len(user_sequence_embeddings.shape))],
            padding="VALID")

        # L2 normalize to get cosine similarity
        item_normalized = tf.nn.l2_normalize(
            item_outputs_max, axis=2)
        user_normalized = tf.nn.l2_normalize(
            user_outputs_max, axis=2)
        outputs = tf.matmul(
            item_normalized,
            user_normalized,
            transpose_b=True)
        return tf.reshape(outputs, [-1, 1])

    def loss_fn(self, target, output):
        cross_entropy = tf.keras.backend.binary_crossentropy(
            target, output, from_logits=False
        )
        if self.l2_reg > 0:
            _regularizer = tf.keras.regularizers.l2(self.l2_reg)
            cross_entropy += _regularizer(self.user_embedding)
            cross_entropy += _regularizer(self.item_embedding)
        return cross_entropy


def debug():
    x_train = [
        np.random.randint(low=0, high=20, size=(5, item_max_len)),
        np.random.randint(low=0, high=20, size=(5, user_max_len))]

    y_train = np.random.randint(low=0, high=2, size=5).astype(dtype=float)

    model = BST_DSSM()
    model.compile(
                optimizer=model.optimizer,
                loss=model.loss_fn
            )
    model.fit(x_train, y_train, epochs=n_epoch)
    model.summary()

于 2020-12-24T03:33:25.570 回答