Embedding
word_index = reuters.get_word_index()
index_to_word={}
for key, value in word_index.items():
index_to_word[value] = key
from tensorflow.keras.utils import to_categorical
y_train_en = to_categorical(y_train)
y_test_en = to_categorical(y_test)
X_train_seq = X_train_seq.reshape(8982,145,1)
X_test_seq = X_test_seq.reshape(2246,145,1)
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense , SimpleRNN, Embedding
model1 = Sequential()
model1.add(SimpleRNN(units = 32, input_shape = (145,1),activation = 'tanh'))
model1.add(Dense(units=64, activation = 'relu'))
model1.add(Dense(units=46, activation = 'softmax'))
model1.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
model1.fit(X_train_seq,y_train_en, epochs = 50)
(X_train,y_train),(X_test,y_test) = reuters.load_data(num_words = max_feature)
max_len = 145
from tensorflow.keras.preprocessing import sequence
X_train_seq = sequence.pad_sequences(X_train, maxlen = max_len)
X_test_seq = sequence.pad_sequences(X_test, maxlen = max_len)
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense , SimpleRNN, Embedding
model2 = Sequential()
model2.add(Embedding(2000,100))
model2.add(SimpleRNN(units = 32,activation = 'tanh'))
model2.add(Dense(units=64, activation = 'relu'))
model2.add(Dense(units=46, activation = 'softmax'))
model2.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
model2.fit(X_train_seq,y_train_en, epochs = 20)
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense , LSTM, Embedding
model3 = Sequential()
model3.add(Embedding(2000,100))
model3.add(LSTM(units = 32,activation = 'tanh'))
model3.add(Dense(units=64, activation = 'relu'))
model3.add(Dense(units=46, activation = 'softmax'))
model3.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
model3.fit(X_train_seq,y_train_en, epochs = 20)
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense , LSTM, Embedding,Conv1D, MaxPooling1D
model4 = Sequential()
model4.add(Embedding(2000,100))
model4.add(Conv1D(filters = 32,kernel_size = 5, activation = 'relu'))
model4.add(MaxPooling1D(pool_size = 4))
model4.add(LSTM(units = 32,activation = 'tanh'))
model4.add(Dense(units=64, activation = 'relu'))
model4.add(Dense(units=46, activation = 'softmax'))
model4.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
model4.fit(X_train_seq,y_train_en, epochs = 20)
텍스트 생성
text="""경마장에 있는 말이 뛰고 있다\n
그의 말이 법이다\n
가는 말이 고와야 오는 말이 곱다"""
from tensorflow.keras.preprocessing.text import Tokenizer
t = Tokenizer()
t.fit_on_texts([text])
t.word_docs
t.word_index
sequences = []
for line in text.split('\n'):
encoded=t.texts_to_sequences([line])[0]
for i in range(1,len(encoded)):
sequence = encoded[:i+1]
sequences.append(sequence)
sequences
from tensorflow.keras.preprocessing.sequence import pad_sequences
max_len = 6
sequences = pad_sequences(sequences,maxlen = max_len)
sequences
import numpy as np
sequences = np.array(sequences)
X = sequences[:,:-1]
y = sequences[:,-1]
from tensorflow.keras.utils import to_categorical
y_en = to_categorical(y,num_classes = 12)
y_en
from tensorflow.keras.layers import Embedding, Dense, SimpleRNN
from tensorflow.keras import Sequential
model1 = Sequential()
model1.add(Embedding(12,10,input_length=5))
model1.add(SimpleRNN(32))
model1.add(Dense(units = 12, activation = 'softmax'))
model1.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
model1.fit(X,y_en,epochs = 200)
def make_sentence(model,t,current_word,n):
init_word = current_word
sentence = ''
for _ in range(n):
encoded = t.texts_to_sequences([current_word])[0]
print(encoded)
print(current_word)
encoded = pad_sequences([encoded],maxlen=max_len -1)
result = model.predict(encoded, verbose = 0).argmax(axis = -1)
for word, index in t.word_index.items():
if index == result:
break
current_word = current_word +" "+word
sentence = sentence + " "+ word
sentence = init_word + sentence
return sentence
print(make_sentence(model1,t,"경마장에",4))