码迷,mamicode.com
首页 > 其他好文 > 详细

如何使用“预训练的词向量”,做文本分类

时间:2018-12-05 23:19:14      阅读:1752      评论:0      收藏:0      [点我收藏+]

标签:错误   size   out   orm   dma   ati   模型   add   item   

不多比比了,看代码!!!

def train_W2V(w2vCorpus, size=100):
    w2vModel = Word2Vec(sentences=w2vCorpus, hs=0, negative=5, min_count=5, window=8, iter=1, size=size)
    w2vModel.save(inPath+w2vModel.model)
    return w2vModel

def load_W2V(W2V_path, loader_mySelf=1):
    if loader_mySelf:
        print(use my w2vModel)
        w2vModel = Word2Vec.load(W2V_path+w2vModel.model)  #使用自己训练的词向量
    else:  #加载腾讯训练的词向量
        print(use other w2vModel)
        w2vModel = gensim.models.KeyedVectors.load_word2vec_format(W2V_path+w2v_embedding_tengxun, binary=False)
    return w2vModel

def make_word2idx_embedMatrix(w2vModel):
    word2idx = {"_PAD": 0} 
    vocab_list = [(w, w2vModel.wv[w]) for w, v in w2vModel.wv.vocab.items()]
    embeddings_matrix = np.zeros((len(w2vModel.wv.vocab.items()) + 1, w2vModel.vector_size))

    for i in range(0, len(vocab_list)):
        word = vocab_list[i][0]
        word2idx[word] = i + 1
        embeddings_matrix[i + 1] = vocab_list[i][1]

    return word2idx, embeddings_matrix
    
def make_deepLearn_data(w2vCorpus, word2idx):
    X_train = []
    for sen in w2vCorpus:
        wordList = []
        for w in sen:
            if w in word2idx.keys(): 
                wordList.append(word2idx[w])
            else: 
                wordList.append(0)
        X_train.append(np.array(wordList))

    X_train = np.array(sequence.pad_sequences(X_train, maxlen=TEXT_MAXLEN))  #必须是np.array()类型
    
    return X_train
    
def Lstm_model():    #注意命名不能和库函数同名,之前命名为LSTM()就出很大的错误!!
    model = Sequential()
    model.add(Embedding(input_dim=len(embeddings_matrix),  ##参数要注意 
                        output_dim=len(embeddings_matrix[0]), 
                        input_length=TEXT_MAXLEN,
                        weights=[embeddings_matrix], #表示直接使用预训练的词向量 
                        trainable=False  #不对词向量微调
                       )) 
    model.add(LSTM(units=20, return_sequences=False)) #units:输出的维度
    model.add(Dropout(0.5))
    model.add(Dense(units=1, activation="sigmoid")) #全连接层
    model.compile(loss=binary_crossentropy, optimizer=adam, metrics=[accuracy])
    return model
    
if __name__ == __main__:
    df_data_ = df_data[0: 10000]  #原始数据加载
    
    w2vCorpus = [sen.split( ) for sen in df_data_.分析字段]  #制作W2V语料集
    w2vModel = train_W2V(w2vCorpus, size=100)  #训练W2V模型
    
    w2vModel = load_W2V(inPath, loader_mySelf=0)  #加载w2vModel
    word2idx, embeddings_matrix = make_word2idx_embedMatrix(w2vModel)  #制作word2idx和embedMatrix

    X_train = make_deepLearn_data(w2vCorpus, word2idx)  #制作符合要求的深度学习数据
    y_train = np.array(df_data_.特征类型)  #必须是np.array()类型
    
    model = Lstm_model()  
    model.fit(X_train[0: -2000], y_train[0: -2000], epochs=2, batch_size=10, verbose=1)
    score = model.evaluate(X_train[-2000: ], y_train[-2000: ])
    print(score)

 

如何使用“预训练的词向量”,做文本分类

标签:错误   size   out   orm   dma   ati   模型   add   item   

原文地址:https://www.cnblogs.com/liguangchuang/p/10074075.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!