Rで実装されたLDAモデルでtext2vecパッケージを使用していますが、各ドキュメントをトピックに割り当てる方法を考えています
BELOW HERE is my code:
library(stringr)
library(rword2vec)
library(wordVectors)
#install.packages("text2vec")
library(text2vec)
library(data.table)
library(magrittr)
prep_fun = function(x) {
x %>%
# make text lower case
str_to_lower %>%
# remove non-alphanumeric symbols
str_replace_all("[^[:alpha:]]", " ") %>%
# collapse multiple spaces
str_replace_all("\\s+", " ")
}
movie_review_train = prep_fun(movie_review_train)
tokens = movie_review_train[1:1000] %>%
tolower %>%
word_tokenizer
it = itoken(tokens, progressbar = FALSE)
v = create_vocabulary(it)
v
vectorizer = vocab_vectorizer(v)
t1 = Sys.time()
dtm_train = create_dtm(it, vectorizer)
print(difftime(Sys.time(), t1, units = 'sec'))
dim(dtm_train)
stop_words = c("i", "me", "my", "myself", "we", "our", "ours", "ourselves")
t1 = Sys.time()
v = create_vocabulary(it, stopwords = stop_words)
print(difftime(Sys.time(), t1, units = 'sec'))
pruned_vocab = prune_vocabulary(v,
term_count_min = 10,
doc_proportion_max = 0.5,
doc_proportion_min = 0.001)
vectorizer = vocab_vectorizer(pruned_vocab)
# create dtm_train with new pruned vocabulary vectorizer
t1 = Sys.time()
dtm_train = create_dtm(it, vectorizer)
print(difftime(Sys.time(), t1, units = 'sec'))
dtm_train_l1_norm = normalize(dtm_train, "l1")
tfidf = TfIdf$new()
# fit model to train data and transform train data with fitted model
dtm_train_tfidf = fit_transform(dtm_train, tfidf)
dtm = transform(dtm_train_tfidf, tfidf)
lda_model <-LDA$new(n_topics = ntopics
,doc_topic_prior = alphaprior
,topic_word_prior = deltaprior
)
lda_model$get_top_words(n = 10, topic_number = c(1:5), lambda = 0.3)
この後、各ドキュメントを関連トピックに割り当てたいと思います。トピックの下に用語のリストを取得していますが、マッピング方法はわかりません。