import keras
import numpy as np
from keras.datasets import imdb
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=1000)
from keras.preprocessing.sequence import pad_sequences
X_train = pad_sequences(X_train, 1000)
X_test = pad_sequences(X_test, 1000)
model = Sequential()
model.add(Embedding(1000, 64, input_length=1000))
model.add(LSTM(output_dim=32, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(8, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adagrad", metrics=["accuracy"])
model.fit(X_train, y_train, batch_size=500, nb_epoch=100)
model.evaluate(X_test, y_test, batch_size=1000)
pred = model.predict(X_test, batch_size=20000)
print (pred[0], y_test[0])
print (pred[1], y_test[1])
print (pred[2], y_test[2])