数据帧对象不可调用 [英] Dataframe Object is not callable

查看:144
本文介绍了数据帧对象不可调用的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

当我运行它时,它一直告诉我数据帧对象不可调用。

  class OptionDataWebGleaner():

def __init __(self):

ticker = pd.read_csv('Yahoo_ticker_List.csv')['AUB.AX']。value
stock = raw_input('请给您选择的选项的代码?\\\
')

如果股票代码:

self.stock =股票
else:

raise TypeError('你的选项在这里不可用')

date_norm = raw_input('请将您的到期日格式以mm / dd / yyyy\\\
'的形式)

maturity_date = datetime.strptime(date_norm,'%m / %d /%Y')。date()

self.maturity_date = maturity_date
self.today = date.today()

dates = ['1481846400 ','1484870400','1487289600']
maturity_dates = [date(2016,12,16),date(2017,1,20),date(2017,2,17)]
date_dict = {}

for v in zip(dates,maturity_da tes)
date_dict [v [1]] = v [0]

try:
self.d = date_dict [self.maturity_date]
except:
打印('您的成绩日期不可用')

选项= raw_input('请给您的选项的类型,打电话或put\\\
')

self.option_type = option +''

@property
def crawl_data(self):#self #option_type:calls or puts。 str

stock = self.stock
option_type = self.option_type
maturity_date = self.maturity_date
d = self.d

chromedriver = /Users/Miya/Downloads/chromedriver.exe
os.environ [webdriver.chrome.driver] = chromedriver
driver = webdriver.Chrome(chromedriver)
today = self。今天

##获取url
url ='http://finance.yahoo.com/quote/'+ stock +'/ options?date ='+ d
##抓取数据
driver.get(url)
html_source = driver.page_source
## Beautifulsoup
soup = BeautifulSoup(html_source,'html.parser')


如果soup.find('table',option_type)不是None:

stock_price = [float(i.text)for i in soup.findAll('span' ,'Fz(36px)']]
title = [i.text for i in soup.find('table',option_type).find_all('th')]
text = [i.text for i in soup.find('table',option_type).find_all('td')]
rows = [row for row in soup.find('table',option_type).find_all (tr)]

l_table = len(rows) - 1
## call / put data
dictionary = {}
dictionary ['maturity_date'] = [maturity_date] * l_table
词典['date'] = [今天] * l_table
词典['stock_price'] = stock_price * l_table


key = title [j]
dictionary [key] = []
for i in range(l_table):
dictionary [key] .append(text [10 * i + j])

##写入数据框

dataframe = pd.DataFrame(字典)


返回数据帧

def clean_data(self):

dataframe = self.crawl_data()

print('Remove意外的符号...')

columns_to_set = ['最后价格','开放利益','罢工','量','隐含波动']
为我在columns_to_set:
系列= dataframe [i]
series_new = []
系列中的j:
j = str(j)
j_new =''.join(ch为ch在j if(ch!='%')和(ch!=','))
series_new.append(j_new)
dataframe [i] = series_new
print('数据类型...')

##更改dtype
columns_to_change = ['Last Price','Open Interest','Strike','Volume','stock_price'隐含的波动率]

在column_to_change中:
dataframe_cleaned [i] = dataframe [i] .astype(float)

print(删除缺失的值。 ..)

dataframe_cleaned = dataframe_cleaned.dropna()

#print(Clean Outliers ...)

#dataframe = dataframe.loc [dataframe ['Implied Volatility']< = 2]

return dataframe_cleaned

def save_file(self):

save_file = raw_input(你要保存文件到csv吗?键入Y为是,否或否)
d = self.d
stock = self.stock
df_option = self.clean_data()

if save_file =='Y':
csv_name = stock + d +'.csv'
df_option.to_csv(csv_name)
print(File Saved!)

def viz(self):

dataframe = self.clean_data()
stock = self.stock
time_to_maturity = []
dataframe = dataframe.sort_values(by ='Strike')
##抓取数据框,然后相关数据
为i,j为zip(dataframe.maturity_date,dataframe.date):
time_to_maturity.append((i - j) .days / 365)

strike_price = dataframe ['Strike']

#通过使用行权价格和时间到期作为参数生成伪隐含波动率

implied_vol = dataframe ['Implied Volatility']值

strike_price,time_to_maturity = np.meshgrid(strike_price,time_to_maturity)

fig = plot.figure(figsize =(10,5))##一个绘图对象
ax = Axes3D(fig)#创建一个3D object / handle

## plot surface:array row / column stride(step size:2)
## plot surface:array row / column stride(step size:2)

surf = ax.plot_surface(strike_price,time_to_maturity,implied_vol,rstride = 2,cstride = 2,cmap = cm.coolwarm,
linewidth = 0.5,antialiased = False)

#set x,y,a labels
ax.set_xlabel('Strike Price')
ax.set_ylabel('到达时间')
ax.set_zlabel('隐含波动率%')
plot.suptitle(股票)
plot.show()

def summary(self):

dataframe = self.clean_data

print(dataframe.describe())


OptionDataWebGleaner()。viz()


解决方案

问题是 crawl_data 上的属性装饰器。 此答案解释了属性装饰器的实际工作原理,但基本上, dataframe.crawl_data 是函数返回的数据帧,而不是函数。因此, clean_data 的第一行中的 dataframe.crawl_data()正在尝试调用数据框,而不是该函数。 / p>

这是一个例子:

 >>> class Test(object):
... @property
... def example(self):
... return 1
...
> >> t = Test()
>>> t.example
1
>>>> t.example()
追溯(最近的最后一次调用):
文件< stdin>,第1行,< module>
TypeError:'int'对象不可调用

这个问题真的可以做到堆栈跟踪。这将导致我们正确地处理有问题的电话。


When I run it, it keeps telling me the dataframe object is not callable.

class OptionDataWebGleaner():

    def __init__(self):

        ticker = pd.read_csv('Yahoo_ticker_List.csv')['AUB.AX'].values
        stock = raw_input('Please give the ticker of your selected option?\n')

        if stock in ticker:

            self.stock = stock
        else:

            raise TypeError('Your option is not available here.')

        date_norm = raw_input('Please give your maturity date in the format of mm/dd/yyyy\n')

        maturity_date = datetime.strptime(date_norm, '%m/%d/%Y').date()

        self.maturity_date = maturity_date
        self.today = date.today()

        dates = ['1481846400', '1484870400', '1487289600']
        maturity_dates = [date(2016, 12, 16), date(2017, 1, 20), date(2017, 2, 17)]
        date_dict = {}

        for v in zip(dates, maturity_dates):
            date_dict[v[1]] = v[0]

        try:
            self.d = date_dict[self.maturity_date]
        except:
            print('Your maturuity date is not available')

        option = raw_input('Please give the type of your option, either call or put\n')

        self.option_type = option + 's'

    @property
    def crawl_data(self):  # self #option_type: calls or puts. str

        stock = self.stock
        option_type = self.option_type
        maturity_date = self.maturity_date
        d = self.d

        chromedriver = "/Users/Miya/Downloads/chromedriver.exe"
        os.environ["webdriver.chrome.driver"] = chromedriver
        driver = webdriver.Chrome(chromedriver)
        today = self.today

        ## Get the url
        url = 'http://finance.yahoo.com/quote/' + stock + '/options?date=' + d
        ## Crawl data
        driver.get(url)
        html_source = driver.page_source
        ## Beautifulsoup
        soup = BeautifulSoup(html_source, 'html.parser')


        if soup.find('table', option_type) is not None:

            stock_price = [float(i.text) for i in soup.findAll('span', 'Fz(36px)')]
            title = [i.text for i in soup.find('table', option_type).find_all('th')]
            text = [i.text for i in soup.find('table', option_type).find_all('td')]
            rows = [row for row in soup.find('table', option_type).find_all("tr")]

            l_table = len(rows) - 1
            ## call/put data
            dictionary = {}
            dictionary['maturity_date'] = [maturity_date] * l_table
            dictionary['date'] = [today] * l_table
            dictionary['stock_price'] = stock_price * l_table

            for j in range(10):
                key = title[j]
                dictionary[key] = []
                for i in range(l_table):
                    dictionary[key].append(text[10 * i + j])

            ## write into dataframe

            dataframe = pd.DataFrame(dictionary)


        return dataframe

    def clean_data(self):

        dataframe = self.crawl_data()

        print('Remove unexpected symbols...')

        columns_to_set = ['Last Price', 'Open Interest', 'Strike', 'Volume', 'Implied Volatility']
        for i in columns_to_set:
            series = dataframe[i]
            series_new = []
            for j in series:
                j = str(j)
                j_new = ''.join(ch for ch in j if (ch != '%') and (ch != ','))
                series_new.append(j_new)
            dataframe[i] = series_new
        print('Change the data type...')

        ## change the dtype
        columns_to_change = ['Last Price', 'Open Interest', 'Strike', 'Volume', 'stock_price', 'Implied Volatility']

        for i in columns_to_change:
            dataframe_cleaned[i] = dataframe[i].astype(float)

        print("Remove missing values...")

        dataframe_cleaned = dataframe_cleaned.dropna()

        # print("Clean Outliers...")

        # dataframe = dataframe.loc[dataframe['Implied Volatility'] <= 2]

        return dataframe_cleaned

    def save_file(self):

        save_file = raw_input("Do you want to save the file into csv? Type Y for yes, N or no\n ")
        d = self.d
        stock = self.stock
        df_option = self.clean_data()

        if save_file == 'Y':
            csv_name = stock + d + '.csv'
            df_option.to_csv(csv_name)
            print("File Saved!")

    def viz(self):

        dataframe = self.clean_data()
        stock = self.stock
        time_to_maturity = []
        dataframe = dataframe.sort_values(by='Strike')
        ## grab dataframe, then relevant data
        for i, j in zip(dataframe.maturity_date, dataframe.date):
            time_to_maturity.append((i - j).days / 365)

        strike_price = dataframe['Strike']

        # generate pseudo-implied volatility by using strike price and time-to-maturity as parameters

        implied_vol = dataframe['Implied Volatility'].values

        strike_price, time_to_maturity = np.meshgrid(strike_price, time_to_maturity)

        fig = plot.figure(figsize=(10, 5))  ## a plot object
        ax = Axes3D(fig)  # create a 3D object/handle

        ##plot surface: array row/column stride(step size:2)
        ##plot surface: array row/column stride(step size:2)

        surf = ax.plot_surface(strike_price, time_to_maturity, implied_vol, rstride=2, cstride=2, cmap=cm.coolwarm,
                               linewidth=0.5, antialiased=False)

        # set x,y,a labels
        ax.set_xlabel('Strike Price')
        ax.set_ylabel('time to maturity')
        ax.set_zlabel('implied volatility%')
        plot.suptitle(stock)
        plot.show()

    def summary(self):

        dataframe = self.clean_data

        print(dataframe.describe())


OptionDataWebGleaner().viz()

解决方案

The problem is the property decorator on crawl_data. This answer explains how the property decorator actually works, but basically, dataframe.crawl_data is the dataframe returned by the function, not the function. So dataframe.crawl_data() in the first line of clean_data is trying to call the dataframe, not the function.

Here's an example:

>>> class Test(object):
...     @property
...     def example(self):
...             return 1
...
>>> t = Test()
>>> t.example
1
>>> t.example()
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
TypeError: 'int' object is not callable

This question really could have done with the stacktrace. It would have lead us right to line with the problematic call.

这篇关于数据帧对象不可调用的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆