0

我试图找出一系列推文中的频率分布,但频率分布是唯一计算每条推文而不是整个推文。我怎样才能解决这个问题?

import tweepy
from tweepy import OAuthHandler
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk import FreqDist
from nltk.corpus import stopwords

consumer_key = 'x'
consumer_secret = 'x'
access_token = 'x'
access_secret = 'x'

auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)

api = tweepy.API(auth)

for tweet in tweepy.Cursor(api.user_timeline,
        "johnnywalker",
        result_type = "recent",
        count = 50,
        include_entities = False,
        exclude_replies = True,
        include_rts = False,
        trim_user = True,
        lang = "en").items():

        croy = word_tokenize(tweet.text)
        ensw = stopwords.words('english')
        filterArr = [word for word in croy if word not in ensw]
        filterArr = [word for word in croy if len(word) > 7]
        fdist = FreqDist(filterArr)
        fdist.most_common(50)
4

1 回答 1

0

您正在计算每条推文的频率分布。如果您想要一系列推文的分布,您应该在循环之外进行计算。

tweet_tokenized = [] 
for tweet in tweepy.Cursor(api.user_timeline,
        "johnnywalker",
        result_type = "recent",
        count = 50,
        include_entities = False,
        exclude_replies = True,
        include_rts = False,
        trim_user = True,
        lang = "en").items():

        croy = word_tokenize(tweet.text)
        ensw = stopwords.words('english')
        filterArr = [word for word in croy if word not in ensw]
        filterArr = [word for word in croy if len(word) > 7]
        tweet_tokenized.extend(filterArr)
fdist = FreqDist(tweet_tokenized)
fdist.most_common(50)
于 2020-04-20T05:18:19.467 回答