求问这样为什么会造成死循环,如何修改这个程序才能使它正常运行

# -*- coding: utf-8 -*-
import csv
import inspect
import math
import numpy as np
from sklearn import preprocessing
import multiprocessing
import sys


def max_min_normalization(data_list):
    """
    利用最大最小数将一组数据进行归一化输出
    x_new = (x - min) / (max - min)
    :param data_list:
    :return:
    """
    normalized_list = []
    max_min_interval = max(data_list) - min(data_list)
    for data in data_list:
        data = float(data)
        new_data = (data - min(data_list)) / max_min_interval
        normalized_list.append(round(new_data, 3))

    return normalized_list


def mean_normalization(data_list):
    """
    利用平均数将一组数据进行标准化输出
    标准化的结果不一定是在0,1之间
    x_new = (x - mean) / (max - min)
    :param data_list:
    :return:
    """
    normalized_list = []
    mean = sum(data_list) / len(data_list)
    max_min_interval = max(data_list) - min(data_list)
    for data in data_list:
        data = float(data)
        new_data = (data - mean) / max_min_interval
        normalized_list.append(round(new_data, 3))

    return normalized_list


def zscores_normalization(data_list):
    """
    利用z-scores方法针对数据进行标准化
    :param data_list:
    :return:
    """
    normalized_list = []
    mean = sum(data_list, 0.0) / len(data_list)
    var_lst = []
    for data in data_list:
        var_lst.append((float(data) - mean) ** 2)
    std_value = math.sqrt(sum(var_lst) / len(var_lst))

    for data in data_list:
        normalized_list.append(round((data - mean) / std_value, 3))

    return normalized_list


def max_min_normalization_using_numpy(data_list):
    """
    用数据处理包numpy归一化
    :param data_list:
    :return:
    """
    normalized_list = []
    max = np.max(data_list)
    min = np.min(data_list)
    for data in data_list:
        new_data = (float(data) - min) / (max - min)
        normalized_list.append(round(new_data, 3))

    return normalized_list


def zscores_normalization_using_numpy(data_list):
    """
    利用numpy中现有的方法计算标准差和平均数,然后用z-scores方法针对数据进行标准化
    :param data_list:
    :return:
    """
    normalized_list = []
    mean = np.mean(data_list)
    std = np.std(data_list)
    for data in data_list:
        normalized_list.append(round((data - mean) / std, 3))
    return normalized_list


def normalize_data_using_sk(data_list):
    """
    利用sklearn学习库自带的归一方法实现
    :param data_list:
    :return:
    """
    data_array = np.asarray(data_list, 'float').reshape(1, -1)
    new_data = preprocessing.minmax_scale(data_array, axis=1)
    return np.round(new_data, 3)[0, :]


def colunmn1(self):
    print('colunmn1 as follow')
    with open('Iris.csv', 'r') as csvfile:
        reader = csv.DictReader(csvfile)
        column1 = [row['SepalLengthCm'] for row in reader]  ##
    data_list1 = [float(x) for x in column1]
    data1 = globals().copy()
    for key1 in data1:
        if inspect.isfunction(data1[key1]):
            res1 = data1[key1](data_list1)
            print('%s:\n%s' % (key1, res1))


colunmn1('a')

python3.7

问题就出在最后一个函数上,column1函数的目的是要读取表格数据,供其他函数调用处理,直接将表格数据做成列表不就行了,不必要通过globals()函数来遍历所有全局变量及其值。建议将最后一个函数修改,不要写成函数,将语句并入main主函数里,获取列表后,分别调用其他函数进行数据处理。

。。。你这明显要依托表格Iris.csv的吧,抛开数据单谈逻辑行不通啊哥们儿

def colunmn1(self): 这个方法参数self是干嘛的? 看里面都没用到,还需要传参 么