关键错误:列中没有 [Int64Index...] dtype='int64] [英] Key Error: None of [Int64Index...] dtype='int64] are in the columns
问题描述
我正在尝试使用 np.random.shuffle() 方法调整索引,但我不断收到一个我不明白的错误.如果有人能帮我解决这个问题,我将不胜感激.谢谢!
我在开始创建 raw_csv_data 变量时尝试使用 delimiter=',' 和 delim_whitespace=0,因为我认为这是另一个问题的解决方案,但它一直抛出相同的错误
将熊猫导入为 pd将 numpy 导入为 np从 sklearn.preprocessing 导入 StandardScaler#%%raw_csv_data= pd.read_csv('缺勤数据.csv')打印(raw_csv_data)#%%df= raw_csv_data.copy()打印(显示(df))#%%pd.options.display.max_columns=无pd.options.display.max_rows=无打印(显示(df))#%%打印(df.info())#%%df=df.drop(['ID'],axis=1)#%%打印(显示(df.head()))#%%#我们的目标是看看谁更有可能缺席.让我们定义#我们的目标来自我们的因变量,缺勤时间(以小时为单位)打印(df ['缺勤时间以小时为单位'])打印(df ['缺勤时间以小时为单位'].median())#%%目标 = np.where(df['缺勤时间以小时为单位']>df['缺勤时间小时'].median(),1,0)#%%打印(目标)#%%df['过度缺勤']=目标#%%打印(df.head())#%%#让我们把日和月的值分开看看有没有相关性#with 缺席的周/月之间打印(类型(df['日期'][0]))#%%df['Date']= pd.to_datetime(df['Date'], format='%d/%m/%Y')#%%打印(df ['日期'])打印(类型(df['日期'][0]))#%%#提取月份值打印(df['日期'][0].月)#%%list_months=[]打印(list_months)#%%打印(df.shape)#%%对于 i 在范围内(df.shape[0]):list_months.append(df['Date'][i].month)#%%打印(list_months)#%%打印(len(list_months))#%%#让我们为 df 创建一个月份值列df['月值']= list_months#%%打印(df.head())#%%#现在让我们从日期中提取星期几df['日期'][699].weekday()#%%def date_to_weekday(date_value):返回 date_value.weekday()#%%df['星期几']= df['Date'].apply(date_to_weekday)#%%打印(df.head())#%%df= df.drop(['日期'], 轴=1)#%%打印(df.columns.values)#%%reordered_columns= ['缺席原因', '月份价值','日期周"、交通费用"、上班距离"、年龄"、每日平均工作负荷"、身体质量指数"、教育"、'孩子们',宠物",以小时为单位的缺勤时间",过度缺勤"]#%%df=df[reordered_columns]打印(df.head())#%%#第一个检查点df_date_mod=df.copy()#%%打印(df_date_mod)#%%#让我们标准化我们的输入,忽略原因和教育列#因为它们由单独的分类标准标记,而不是在数字上打印(df_date_mod.columns.values)#%%unscaled_inputs= df_date_mod.loc[:, ['Month Value','Day of the周"、交通费"、上班距离"、年龄"、日常工作"加载平均"、身体质量指数"、儿童"、宠物"、缺勤时间"小时']]#%%打印(显示(未缩放的输入))#%%缺勤_标尺=标准标尺()#%%缺勤_scaler.fit(unscaled_inputs)#%%scaled_inputs=缺勤_scaler.transform(unscaled_inputs)#%%打印(显示(缩放输入))#%%打印(scaled_inputs.shape)#%%scaled_inputs= pd.DataFrame(scaled_inputs, columns=['Month Value','Day每周"、交通费用"、工作距离"、年龄"、每日"平均工作负荷"、体重指数"、儿童"、宠物"、缺勤"时间小时'])打印(显示(缩放输入))#%%df_date_mod= df_date_mod.drop(['月份值','日期周"、交通费"、上班距离"、年龄"、日常工作"平均负荷'、'身体质量指数'、'儿童'、'宠物'、'缺勤时间小时'],轴=1)打印(显示(df_date_mod))#%%df_date_mod=pd.concat([df_date_mod,scaled_inputs],axis=1)打印(显示(df_date_mod))#%%df_date_mod=df_date_mod[reordered_columns]打印(显示(df_date_mod.head()))#%%#检查点df_date_scale_mod=df_date_mod.copy()打印(显示(df_date_scale_mod.head()))#%%#分析缺席原因分类打印(df_date_scale_mod['缺席原因'])#%%打印(df_date_scale_mod['缺席原因'].min())打印(df_date_scale_mod['缺席原因'].max())#%%打印(df_date_scale_mod['缺席原因'].unique())#%%打印(len(df_date_scale_mod ['缺席原因'].unique()))#%%打印(排序(df ['缺席原因'].unique()))#%%reason_columns= pd.get_dummies(df['缺席原因'])打印(原因列)#%%reason_columns['check']= reason_columns.sum(axis=1)打印(原因列)#%%打印(reason_columns['check'].sum(axis=0))#%%打印(reason_columns['check'].unique())#%%reason_columns=reason_columns.drop(['check'],axis=1)打印(原因列)#%%reason_columns=pd.get_dummies(df_date_scale_mod['缺席原因'],drop_first=真)打印(原因列)#%%打印(df_date_scale_mod.columns.values)#%%打印(reason_columns.columns.values)#%%df_date_scale_mod= df_date_scale_mod.drop(['缺席原因'],轴=1)打印(df_date_scale_mod)#%%reason_type_1= reason_columns.loc[:, 1:14].max(axis=1)reason_type_2= reason_columns.loc[:, 15:17].max(axis=1)reason_type_3= reason_columns.loc[:, 18:21].max(axis=1)reason_type_4= reason_columns.loc[:, 22:].max(axis=1)#%%打印(reason_type_1)打印(reason_type_2)打印(reason_type_3)打印(reason_type_4)#%%打印(df_date_scale_mod.head())#%%df_date_scale_mod= pd.concat([df_date_scale_mod,reason_type_1、reason_type_2、reason_type_3、reason_type_4]、axis=1)打印(df_date_scale_mod.head())#%%打印(df_date_scale_mod.columns.values)#%%column_names= ['月份价值','星期几','运输费用',工作距离"、年龄"、平均每日工作负荷"、体重指数"、教育"、儿童"、宠物"、旷工时间"、过度缺勤"、原因_1"、原因_2"、原因_3"、'原因_4']df_date_scale_mod.columns= column_names打印(df_date_scale_mod.head())#%%column_names_reordered = ['Reason_1', 'Reason_2', 'Reason_3','Reason_4','Month Value','星期几','运输费用',工作距离"、年龄"、平均每日工作负荷"、体重指数"、教育"、儿童"、宠物"、旷工时间"、'过度缺勤']df_date_scale_mod=df_date_scale_mod[column_names_reordered]打印(显示(df_date_scale_mod.head()))#%%#检查点df_date_scale_mod_reas= df_date_scale_mod.copy()打印(df_date_scale_mod_reas.head())#%%#现在来看教育栏目打印(df_date_scale_mod_reas['教育'].unique())#这向我们表明,教育是根据级别从 1-4 分级的#完成#%%打印(df_date_scale_mod_reas['教育'].value_counts())#绝大多数工人都受过高中教育,而这#rest有更高的学位#%%#我们将创建我们的虚拟变量作为高中和高等教育df_date_scale_mod_reas['教育']=df_date_scale_mod_reas['教育'].map({1:0, 2:1, 3:1, 4:1})#%%打印(df_date_scale_mod_reas['教育'].unique())#%%打印(df_date_scale_mod_reas['教育'].value_counts())#%%#检查点df_preprocessed = df_date_scale_mod_reas.copy()打印(显示(df_preprocessed.head()))#%%#%%#从目标拆分输入scaled_inputs_all= df_preprocessed.loc[:,'Reason_1':'缺勤时间在小时']打印(显示(scaled_inputs_all.head()))打印(scaled_inputs_all.shape)#%%target_all= df_preprocessed.loc[:,'过度缺勤']打印(显示(targets_all.head()))打印(targets_all.shape)#%%#Shuffle 输入和目标shuffled_indices= np.arange(scaled_inputs_all.shape[0])np.random.shuffle(shuffled_indices)shuffled_inputs= scaled_inputs_all[shuffled_indices]shuffled_targets=targets_all[shuffled_indices]
<块引用>
这是我尝试调整索引时不断收到的错误:
KeyError Traceback(最后一次调用)在1 shuffled_indices= np.arange(scaled_inputs_all.shape[0])2 np.random.shuffle(shuffled_indices)---->3 shuffled_inputs= scaled_inputs_all[shuffled_indices]4 shuffled_targets=targets_all[shuffled_indices]
~Anaconda3libsite-packagespandascoreframe.py 中getitem(self, key) 2932 key = list(key) 2933 indexer = self.loc._convert_to_indexer(key,axis=1,-> 2934 raise_missing=True) 2935 2936 # take() 不接受布尔索引器
~Anaconda3libsite-packagespandascoreindexing.py 中_convert_to_indexer(self, obj, axis, is_setter, raise_missing) 1352 kwargs = {'raise_missing': 如果 is_setter 否则为真 1353
raise_missing}-> 1354 返回 self._get_listlike_indexer(obj,axis, **kwargs)[1] 1355 else: 1356 try:
~Anaconda3libsite-packagespandascoreindexing.py 中_get_listlike_indexer(self, key,axis, raise_missing) 1159 self._validate_read_indexer(keyarr, indexer, 1160
o._get_axis_number(axis),-> 1161 raise_missing=raise_missing) 1162 返回keyarr,索引器
第1163章
~Anaconda3libsite-packagespandascoreindexing.py 中_validate_read_indexer(自我,键,索引器,轴,raise_missing)1244引发KeyError(1245
u"[{key}] 中没有一个在 [{axis}]".format(-> 1246 key=key,axis=self.obj._get_axis_name(axis))) 1247 1248 #我们(暂时)允许 .loc 丢失一些键,除了在
KeyError: "[Int64Index([560, 320, 405, 141, 154, 370, 656,26, 444, 307, ... 429, 542, 676, 588, 315,284, 293, 607, 197, 250], dtype='int64', length=700)] 是在 [列]"
您使用 loc
创建了您的 scaled_inputs_all
DataFrame函数,所以它很可能不包含连续的索引.
另一方面,您创建了 shuffled_indices
作为 shuffle来自一系列连续的数字.
记住 scaled_inputs_all[shuffled_indices]
获取行scaled_inputs_all
的 索引值 等于shuffled_indices
的元素.
也许你应该写:
scaled_inputs_all.iloc[shuffled_indices]
请注意,iloc
提供基于整数位置的索引,而不管索引值,即您需要的值.
I'm trying to shuffle my indices using the np.random.shuffle() method, but I keep getting an error that I don't understand. I'd appreciate it if someone could help me puzzle this out. Thank you!
I've tried to use the delimiter=',' and delim_whitespace=0 when I made my raw_csv_data variable at the beginning, as I saw that as the solution of another problem, but it kept throwing the same error
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
#%%
raw_csv_data= pd.read_csv('Absenteeism-data.csv')
print(raw_csv_data)
#%%
df= raw_csv_data.copy()
print(display(df))
#%%
pd.options.display.max_columns=None
pd.options.display.max_rows=None
print(display(df))
#%%
print(df.info())
#%%
df=df.drop(['ID'], axis=1)
#%%
print(display(df.head()))
#%%
#Our goal is to see who is more likely to be absent. Let's define
#our targets from our dependent variable, Absenteeism Time in Hours
print(df['Absenteeism Time in Hours'])
print(df['Absenteeism Time in Hours'].median())
#%%
targets= np.where(df['Absenteeism Time in Hours']>df['Absenteeism Time
in Hours'].median(),1,0)
#%%
print(targets)
#%%
df['Excessive Absenteeism']= targets
#%%
print(df.head())
#%%
#Let's Separate the Day and Month Values to see if there is
correlation
#between Day of week/month with absence
print(type(df['Date'][0]))
#%%
df['Date']= pd.to_datetime(df['Date'], format='%d/%m/%Y')
#%%
print(df['Date'])
print(type(df['Date'][0]))
#%%
#Extracting the Month Value
print(df['Date'][0].month)
#%%
list_months=[]
print(list_months)
#%%
print(df.shape)
#%%
for i in range(df.shape[0]):
list_months.append(df['Date'][i].month)
#%%
print(list_months)
#%%
print(len(list_months))
#%%
#Let's Create a Month Value Column for df
df['Month Value']= list_months
#%%
print(df.head())
#%%
#Now let's extract the day of the week from date
df['Date'][699].weekday()
#%%
def date_to_weekday(date_value):
return date_value.weekday()
#%%
df['Day of the Week']= df['Date'].apply(date_to_weekday)
#%%
print(df.head())
#%%
df= df.drop(['Date'], axis=1)
#%%
print(df.columns.values)
#%%
reordered_columns= ['Reason for Absence', 'Month Value','Day of the
Week','Transportation Expense', 'Distance to Work', 'Age',
'Daily Work Load Average', 'Body Mass Index', 'Education',
'Children',
'Pets',
'Absenteeism Time in Hours', 'Excessive Absenteeism']
#%%
df=df[reordered_columns]
print(df.head())
#%%
#First Checkpoint
df_date_mod= df.copy()
#%%
print(df_date_mod)
#%%
#Let's Standardize our inputs, ignoring the Reasons and Education
Columns
#Because they are labelled by a separate categorical criteria, not
numerically
print(df_date_mod.columns.values)
#%%
unscaled_inputs= df_date_mod.loc[:, ['Month Value','Day of the
Week','Transportation Expense','Distance to Work','Age','Daily Work
Load
Average','Body Mass Index','Children','Pets','Absenteeism Time in
Hours']]
#%%
print(display(unscaled_inputs))
#%%
absenteeism_scaler= StandardScaler()
#%%
absenteeism_scaler.fit(unscaled_inputs)
#%%
scaled_inputs= absenteeism_scaler.transform(unscaled_inputs)
#%%
print(display(scaled_inputs))
#%%
print(scaled_inputs.shape)
#%%
scaled_inputs= pd.DataFrame(scaled_inputs, columns=['Month Value','Day
of the Week','Transportation Expense','Distance to Work','Age','Daily
Work Load Average','Body Mass Index','Children','Pets','Absenteeism
Time
in Hours'])
print(display(scaled_inputs))
#%%
df_date_mod= df_date_mod.drop(['Month Value','Day of the
Week','Transportation Expense','Distance to Work','Age','Daily Work
Load Average','Body Mass Index','Children','Pets','Absenteeism Time in
Hours'], axis=1)
print(display(df_date_mod))
#%%
df_date_mod=pd.concat([df_date_mod,scaled_inputs], axis=1)
print(display(df_date_mod))
#%%
df_date_mod= df_date_mod[reordered_columns]
print(display(df_date_mod.head()))
#%%
#Checkpoint
df_date_scale_mod= df_date_mod.copy()
print(display(df_date_scale_mod.head()))
#%%
#Let's Analyze the Reason for Absence Category
print(df_date_scale_mod['Reason for Absence'])
#%%
print(df_date_scale_mod['Reason for Absence'].min())
print(df_date_scale_mod['Reason for Absence'].max())
#%%
print(df_date_scale_mod['Reason for Absence'].unique())
#%%
print(len(df_date_scale_mod['Reason for Absence'].unique()))
#%%
print(sorted(df['Reason for Absence'].unique()))
#%%
reason_columns= pd.get_dummies(df['Reason for Absence'])
print(reason_columns)
#%%
reason_columns['check']= reason_columns.sum(axis=1)
print(reason_columns)
#%%
print(reason_columns['check'].sum(axis=0))
#%%
print(reason_columns['check'].unique())
#%%
reason_columns=reason_columns.drop(['check'], axis=1)
print(reason_columns)
#%%
reason_columns=pd.get_dummies(df_date_scale_mod['Reason for Absence'],
drop_first=True)
print(reason_columns)
#%%
print(df_date_scale_mod.columns.values)
#%%
print(reason_columns.columns.values)
#%%
df_date_scale_mod= df_date_scale_mod.drop(['Reason for Absence'],
axis=1)
print(df_date_scale_mod)
#%%
reason_type_1= reason_columns.loc[:, 1:14].max(axis=1)
reason_type_2= reason_columns.loc[:, 15:17].max(axis=1)
reason_type_3= reason_columns.loc[:, 18:21].max(axis=1)
reason_type_4= reason_columns.loc[:, 22:].max(axis=1)
#%%
print(reason_type_1)
print(reason_type_2)
print(reason_type_3)
print(reason_type_4)
#%%
print(df_date_scale_mod.head())
#%%
df_date_scale_mod= pd.concat([df_date_scale_mod,
reason_type_1,reason_type_2, reason_type_3, reason_type_4], axis=1)
print(df_date_scale_mod.head())
#%%
print(df_date_scale_mod.columns.values)
#%%
column_names= ['Month Value','Day of the Week','Transportation
Expense',
'Distance to Work','Age','Daily Work Load Average','Body Mass Index',
'Education','Children','Pets','Absenteeism Time in Hours',
'Excessive Absenteeism', 'Reason_1', 'Reason_2', 'Reason_3',
'Reason_4']
df_date_scale_mod.columns= column_names
print(df_date_scale_mod.head())
#%%
column_names_reordered= ['Reason_1', 'Reason_2', 'Reason_3',
'Reason_4','Month Value','Day of the Week','Transportation Expense',
'Distance to Work','Age','Daily Work Load Average','Body Mass Index',
'Education','Children','Pets','Absenteeism Time in Hours',
'Excessive Absenteeism']
df_date_scale_mod=df_date_scale_mod[column_names_reordered]
print(display(df_date_scale_mod.head()))
#%%
#Checkpoint
df_date_scale_mod_reas= df_date_scale_mod.copy()
print(df_date_scale_mod_reas.head())
#%%
#Let's Look at the Education column now
print(df_date_scale_mod_reas['Education'].unique())
#This shows us that education is rated from 1-4 based on level
#of completion
#%%
print(df_date_scale_mod_reas['Education'].value_counts())
#The overwhelming majority of workers are highschool educated, while
the
#rest have higher degrees
#%%
#We'll create our dummy variables as highschool and higher education
df_date_scale_mod_reas['Education']=
df_date_scale_mod_reas['Education'].map({1:0, 2:1, 3:1, 4:1})
#%%
print(df_date_scale_mod_reas['Education'].unique())
#%%
print(df_date_scale_mod_reas['Education'].value_counts())
#%%
#Checkpoint
df_preprocessed= df_date_scale_mod_reas.copy()
print(display(df_preprocessed.head()))
#%%
#%%
#Split Inputs from targets
scaled_inputs_all= df_preprocessed.loc[:,'Reason_1':'Absenteeism Time
in
Hours']
print(display(scaled_inputs_all.head()))
print(scaled_inputs_all.shape)
#%%
targets_all= df_preprocessed.loc[:,'Excessive Absenteeism']
print(display(targets_all.head()))
print(targets_all.shape)
#%%
#Shuffle Inputs and targets
shuffled_indices= np.arange(scaled_inputs_all.shape[0])
np.random.shuffle(shuffled_indices)
shuffled_inputs= scaled_inputs_all[shuffled_indices]
shuffled_targets= targets_all[shuffled_indices]
This is the error I keep getting when I try to shuffle my indices:
KeyError Traceback (most recent call last) in 1 shuffled_indices= np.arange(scaled_inputs_all.shape[0]) 2 np.random.shuffle(shuffled_indices) ----> 3 shuffled_inputs= scaled_inputs_all[shuffled_indices] 4 shuffled_targets= targets_all[shuffled_indices]
~Anaconda3libsite-packagespandascoreframe.py in getitem(self, key) 2932 key = list(key) 2933 indexer = self.loc._convert_to_indexer(key, axis=1, -> 2934 raise_missing=True) 2935 2936 # take() does not accept boolean indexers
~Anaconda3libsite-packagespandascoreindexing.py in _convert_to_indexer(self, obj, axis, is_setter, raise_missing) 1352 kwargs = {'raise_missing': True if is_setter else 1353
raise_missing} -> 1354 return self._get_listlike_indexer(obj, axis, **kwargs)[1] 1355 else: 1356 try:~Anaconda3libsite-packagespandascoreindexing.py in _get_listlike_indexer(self, key, axis, raise_missing) 1159 self._validate_read_indexer(keyarr, indexer, 1160
o._get_axis_number(axis), -> 1161 raise_missing=raise_missing) 1162 return keyarr, indexer
1163~Anaconda3libsite-packagespandascoreindexing.py in _validate_read_indexer(self, key, indexer, axis, raise_missing) 1244 raise KeyError( 1245
u"None of [{key}] are in the [{axis}]".format( -> 1246 key=key, axis=self.obj._get_axis_name(axis))) 1247 1248 # We (temporarily) allow for some missing keys with .loc, except inKeyError: "None of [Int64Index([560, 320, 405, 141, 154, 370, 656, 26, 444, 307, ... 429, 542, 676, 588, 315, 284, 293, 607, 197, 250], dtype='int64', length=700)] are in the [columns]"
You created your scaled_inputs_all
DataFrame using loc
function, so it most likely contains no consecutive indices.
On the other hand, you created shuffled_indices
as a shuffle
from just a range of consecutive numbers.
Remember that scaled_inputs_all[shuffled_indices]
gets rows
of scaled_inputs_all
which have index values equal to
elements of shuffled_indices
.
Maybe you should write:
scaled_inputs_all.iloc[shuffled_indices]
Note that iloc
provides integer-location based indexing, regardless of
index values, i.e. just what you need.
这篇关于关键错误:列中没有 [Int64Index...] dtype='int64]的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!