Я пытаюсь выполнить базовую классификацию символов в Google Colab с помощью TPU. Я получаю следующую ошибку:
InvalidArgumentError: Unsupported data type for TPU: double, caused by output cond_8/Merge:0
Я не знаю, в чем проблема, поскольку я использую float32 при создании массивов numpy. Я также не знаю, к чему относится cond_8 / Merge: 0. Входной файл, который я загружаю, представляет собой массив JSON, который представляет множество изображений в оттенках серого 28x28.
[{"label":25,"data":[[[1],[.56720000]...],...]}]
Я попытался закомментировать все слои, кроме первого входного слоя, но проблема все еще возникает !! Мой код:
import os, re, math, json, shutil, pprint
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
import json
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.python.platform import tf_logging
from google.colab import drive
print("Tensorflow version " + tf.__version__)
with open('/tmp/encoded.json') as json_file:
data = json.load(json_file)
print("Got data")
images_data = list(map(lambda row: row["data"],data))
label_data = list(map(lambda row: row["label"],data))
print("mapped data")
images_data_tensor = np.asarray(images_data, dtype=np.float32)
label_data_tensor = np.asarray(label_data, dtype=np.float32)
print("converted to tensors")
BATCH_SIZE = 128
N = 24
# This model trains to 99.4% sometimes 99.5% accuracy in 10 epochs (with a batch size of 32)
def create_model():
l = tf.keras.layers
model = tf.keras.Sequential(
[
#l.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1)),
l.Conv2D(input_shape=(28,28,1,), filters=6, kernel_size=3, padding='same', use_bias=False), # no bias necessary before batch norm
l.BatchNormalization(scale=False, center=True), # no batch norm scaling necessary before "relu"
l.Activation('relu'), # activation after batch norm
l.Conv2D(filters=12, kernel_size=6, padding='same', use_bias=False, strides=2),
l.BatchNormalization(scale=False, center=True),
l.Activation('relu'),
l.Conv2D(filters=24, kernel_size=6, padding='same', use_bias=False, strides=2),
l.BatchNormalization(scale=False, center=True),
l.Activation('relu'),
l.Flatten(),
l.Dense(200, use_bias=False),
l.BatchNormalization(scale=False, center=True),
l.Activation('relu'),
l.Dropout(0.5), # Dropout on dense layer only
l.Dense(10, activation='softmax')
])
return model
# set up learning rate decay
lr_decay = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 0.0001 + 0.02 * math.pow(0.5, 1+epoch), verbose=True)
EPOCHS = 10
tpu = None
# Default strategy for GPU/CPU. Note that tensorflow-gpu will need to be installed for GPU to work
strategy = tf.distribute.MirroredStrategy()
try: # TPU detection
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # Picks up a connected TPU on Google's Colab, ML Engine, Kubernetes and Deep Learning VMs accessed through the 'ctpu up' utility
#tpu = tf.distribute.cluster_resolver.TPUClusterResolver('MY_TPU_NAME') # If auto-detection does not work, you can pass the name of the TPU explicitly (tip: on a VM created with "ctpu up" the TPU has the same name as the VM)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
print('Training on CPU')
with strategy.scope():
trained_model = create_model()
trained_model.compile(optimizer='adam', # learning rate will be set by LearningRateScheduler
loss='categorical_crossentropy',
metrics=['accuracy'])
# print model layers
trained_model.summary()
history = trained_model.fit(x=images_data_tensor,y=label_data_tensor, epochs=EPOCHS, callbacks=[lr_decay])
print(history.history.keys())