21、预测延迟

21、预测延迟

from collections import defaultdict

import time

import gc

import numpy as np

import matplotlib.pyplot as plt

from sklearn.preprocessing import StandardScaler

from sklearn.model_selection import train_test_split

from sklearn.datasets import make_regression

from sklearn.ensemble import RandomForestRegressor

from sklearn.linear_model import Ridge

from sklearn.linear_model import SGDRegressor

from sklearn.svm import SVR

from sklearn.utils import shuffle

plt.rcParams['font.sans-serif'] = ['SimHei']

plt.rcParams['axes.unicode_minus'] = False

def _not_in_sphinx():

    # Hack to detect whether we are running by the sphinx builder

    return '__file__' in globals()

def atomic_benchmark_estimator(estimator, X_test, verbose=False):

    """Measure runtime prediction of each instance."""

    n_instances = X_test.shape[0]

    runtimes = np.zeros(n_instances, dtype=np.float)

    for i in range(n_instances):

        instance = X_test[[i], :]

        start = time.time()

        estimator.predict(instance)

        runtimes[i] = time.time() - start

    if verbose:

        print("atomic_benchmark runtimes:", min(runtimes), np.percentile(

            runtimes, 50), max(runtimes))

    return runtimes

def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):

    """Measure runtime prediction of the whole input."""

    n_instances = X_test.shape[0]

    runtimes = np.zeros(n_bulk_repeats, dtype=np.float)

    for i in range(n_bulk_repeats):

        start = time.time()

        estimator.predict(X_test)

        runtimes[i] = time.time() - start

    runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))

    if verbose:

        print("bulk_benchmark runtimes:", min(runtimes), np.percentile(

            runtimes, 50), max(runtimes))

    return runtimes

def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):


    atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)

    bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,

                                            verbose)

    return atomic_runtimes, bulk_runtimes

def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):


    if verbose:

        print("generating dataset...")

    X, y, coef = make_regression(n_samples=n_train + n_test,

                                n_features=n_features, noise=noise, coef=True)

    random_seed = 13

    X_train, X_test, y_train, y_test = train_test_split(

        X, y, train_size=n_train, test_size=n_test, random_state=random_seed)

    X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)

    X_scaler = StandardScaler()

    X_train = X_scaler.fit_transform(X_train)

    X_test = X_scaler.transform(X_test)

    y_scaler = StandardScaler()

    y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]

    y_test = y_scaler.transform(y_test[:, None])[:, 0]

    gc.collect()

    if verbose:

        print("ok")

    return X_train, y_train, X_test, y_test

def boxplot_runtimes(runtimes, pred_type, configuration):


    fig, ax1 = plt.subplots(figsize=(10, 6))

    bp = plt.boxplot(runtimes, )

    cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],

                                  estimator_conf['complexity_computer'](

                                      estimator_conf['instance']),

                                  estimator_conf['complexity_label']) for

                estimator_conf in configuration['estimators']]

    plt.setp(ax1, xticklabels=cls_infos)

    plt.setp(bp['boxes'], color='black')

    plt.setp(bp['whiskers'], color='black')

    plt.setp(bp['fliers'], color='red', marker='+')

    ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',

                  alpha=0.5)

    ax1.set_axisbelow(True)

    ax1.set_title('每个实例的预测时间 - %s, %d feats.' % (

        pred_type.capitalize(),

        configuration['n_features']))

    ax1.set_ylabel('Prediction Time (us)')

    plt.show()

def benchmark(configuration):


    X_train, y_train, X_test, y_test = generate_dataset(

        configuration['n_train'], configuration['n_test'],

        configuration['n_features'])

    stats = {}

    for estimator_conf in configuration['estimators']:

        print("Benchmarking", estimator_conf['instance'])

        estimator_conf['instance'].fit(X_train, y_train)

        gc.collect()

        a, b = benchmark_estimator(estimator_conf['instance'], X_test)

        stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}

    cls_names = [estimator_conf['name'] for estimator_conf in configuration[

        'estimators']]

    runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]

    boxplot_runtimes(runtimes, 'atomic', configuration)

    runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]

    boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],

                    configuration)

def n_feature_influence(estimators, n_train, n_test, n_features, percentile):


    percentiles = defaultdict(defaultdict)

    for n in n_features:

        print("benchmarking with %d features" % n)

        X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)

        for cls_name, estimator in estimators.items():

            estimator.fit(X_train, y_train)

            gc.collect()

            runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)

            percentiles[cls_name][n] = 1e6 * np.percentile(runtimes,

                                                          percentile)

    return percentiles

def plot_n_features_influence(percentiles, percentile):

    fig, ax1 = plt.subplots(figsize=(10, 6))

    colors = ['r', 'g', 'b']

    for i, cls_name in enumerate(percentiles.keys()):

        x = np.array(sorted([n for n in percentiles[cls_name].keys()]))

        y = np.array([percentiles[cls_name][n] for n in x])

        plt.plot(x, y, color=colors[i], )

    ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',

                  alpha=0.5)

    ax1.set_axisbelow(True)

    ax1.set_title('具有特征的预测时间演化')

    ax1.set_xlabel('#Features')

    ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)

    plt.show()

def benchmark_throughputs(configuration, duration_secs=0.1):


    X_train, y_train, X_test, y_test = generate_dataset(

        configuration['n_train'], configuration['n_test'],

        configuration['n_features'])

    throughputs = dict()

    for estimator_config in configuration['estimators']:

        estimator_config['instance'].fit(X_train, y_train)

        start_time = time.time()

        n_predictions = 0

        while (time.time() - start_time) < duration_secs:

            estimator_config['instance'].predict(X_test[[0]])

            n_predictions += 1

        throughputs[estimator_config['name']] = n_predictions / duration_secs

    return throughputs

def plot_benchmark_throughput(throughputs, configuration):

    fig, ax = plt.subplots(figsize=(10, 6))

    colors = ['r', 'g', 'b']

    cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],

                                  estimator_conf['complexity_computer'](

                                      estimator_conf['instance']),

                                  estimator_conf['complexity_label']) for

                estimator_conf in configuration['estimators']]

    cls_values = [throughputs[estimator_conf['name']] for estimator_conf in

                  configuration['estimators']]

    plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)

    ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))

    ax.set_xticklabels(cls_infos, fontsize=10)

    ymax = max(cls_values) * 1.2

    ax.set_ylim((0, ymax))

    ax.set_ylabel('Throughput (predictions/sec)')

    ax.set_title('不同估计量的预测吞吐量 (%d '

                'features)' % configuration['n_features'])

    plt.show()

start_time = time.time()

# 各种回归器的基准体/原子预测速度

configuration = {

    'n_train': int(1e3),

    'n_test': int(1e2),

    'n_features': int(1e2),

    'estimators': [

        {'name': 'Linear Model',

        'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,

                                  l1_ratio=0.25, tol=1e-4),

        'complexity_label': 'non-zero coefficients',

        'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},

        {'name': 'RandomForest',

        'instance': RandomForestRegressor(),

        'complexity_label': 'estimators',

        'complexity_computer': lambda clf: clf.n_estimators},

        {'name': 'SVR',

        'instance': SVR(kernel='rbf'),

        'complexity_label': 'support vectors',

        'complexity_computer': lambda clf: len(clf.support_vectors_)},

    ]

}

benchmark(configuration)

# 基准n特征对预测速度的影响

percentile = 90

percentiles = n_feature_influence({'ridge': Ridge()},

                                  configuration['n_train'],

                                  configuration['n_test'],

                                  [100, 250, 500], percentile)

plot_n_features_influence(percentiles, percentile)

# 基准吞吐量

throughputs = benchmark_throughputs(configuration)

plot_benchmark_throughput(throughputs, configuration)

stop_time = time.time()

print("example run in %.2fs" % (stop_time - start_time))


©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 219,110评论 6 508
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 93,443评论 3 395
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 165,474评论 0 356
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 58,881评论 1 295
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 67,902评论 6 392
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 51,698评论 1 305
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 40,418评论 3 419
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 39,332评论 0 276
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 45,796评论 1 316
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 37,968评论 3 337
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 40,110评论 1 351
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 35,792评论 5 346
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 41,455评论 3 331
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 32,003评论 0 22
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 33,130评论 1 272
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 48,348评论 3 373
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 45,047评论 2 355

推荐阅读更多精彩内容