1. torch-cam工具包:CAM热力图
#安装配置环境
pip install numpy pandas matplotlib requests tqdm opencv-python pillow scanpy anndata scipy tqdm stlearn sklearn glob2 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10.0/index.html
#下载中文字体文件
wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/SimHei.ttf
#下载ImageNet1000类别信息
wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/meta_data/imagenet_class_index.csv
#创建目录
import os
# 存放测试图片
os.mkdir('test_img')
# 存放结果文件
os.mkdir('output')
# 存放训练得到的模型权重
# 不能命名为checkpoints
os.mkdir('checkpoint')
# 下载样例模型文件
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/checkpoints/fruit30_pytorch_20220814.pth -P checkpoint
# 下载 类别名称 和 ID索引号 的映射字典
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/fruit30/labels_to_idx.npy
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/fruit30/idx_to_labels.npy
# 下载测试图像文件 至 test_img 文件夹
# 边牧犬,来源:https://www.woopets.fr/assets/races/000/066/big-portrait/border-collie.jpg
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/border-collie.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/cat_dog.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/0818/room_video.mp4 -P test_img
# 草莓图像,来源:https://www.pexels.com/zh-cn/photo/4828489/
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/0818/test_草莓.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_fruits.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_orange_2.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_bananan.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_kiwi.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_石榴.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_orange.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_lemon.jpg -P test_img
# !wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_火龙果.jpg -P test_img
#安装torchcam
# 删除原有的 torch-cam 目录(如有)
!rm -rf torch-cam
# 下载安装 torch-cam
!git clone https://github.com/frgfm/torch-cam.git
!pip install -e torch-cam/.
# 验证安装成功
import torchcam
import matplotlib.pyplot as plt
%matplotlib inline
# Linux操作系统,例如 云GPU平台:https://featurize.cn/?s=d7ce99f842414bfcaea5662a97581bd1
# 如果报错 Unable to establish SSL connection.,重新运行本代码块即可
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/SimHei.ttf -O /environment/miniconda3/lib/python3.7/site-packages/matplotlib/mpl-data/fonts/ttf/SimHei.ttf --no-check-certificate
!rm -rf /home/featurize/.cache/matplotlib
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
matplotlib.rc("font",family='SimHei') # 中文字体
plt.rcParams['axes.unicode_minus']=False # 用来正常显示负号
torchcam可解释性分析可视化(命令行),对图像进行各种基于CAM的可解释性分析
#导入工具包
import os
import pandas as pd
from PIL import Image
python torch-cam/scripts/cam_example.py --help
#ImageNet预训练图像分类模型
# ImageNet1000类别名称与ID号
df = pd.read_csv('imagenet_class_index.csv')
df
神经网络的注意力可视化
#图中只有一个类别
# 类别-边牧犬
python torch-cam/scripts/cam_example.py \
--img test_img/border-collie.jpg \
--savefig output/B1_border_collie.jpg \
--arch resnet18 \
--class-idx 232 \
--rows 2
Image.open('output/B1_border_collie.jpg')
#图中有多个类别
# 类别-虎斑猫
python torch-cam/scripts/cam_example.py \
--img test_img/cat_dog.jpg \
--savefig output/B2_cat_dog.jpg \
--arch resnet18 \
--class-idx 282 \
--rows 2
Image.open('output/B2_cat_dog.jpg')
# 类别-边牧犬
python torch-cam/scripts/cam_example.py \
--img test_img/cat_dog.jpg \
--savefig output/B3_cat_dog.jpg \
--arch resnet18 \
--class-idx 232 \
--rows 2
Image.open('output/B3_cat_dog.jpg')
torchcam可解释性分析可视化(python API),对pytorch预训练的1000图像分类模型进行基于CAM的可解释性分析
#导入工具包
import matplotlib.pyplot as plt
%matplotlib inline
from PIL import Image
import torch
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)
#导入中文字体
from PIL import ImageFont, ImageDraw
# 导入中文字体,指定字体大小
font = ImageFont.truetype('SimHei.ttf', 50)
#导入ImageNet预训练模型
from torchvision.models import resnet18
model = resnet18(pretrained=True).eval().to(device)
#导入可解释性分析方法
from torchcam.methods import SmoothGradCAMpp
# CAM GradCAM GradCAMpp ISCAM LayerCAM SSCAM ScoreCAM SmoothGradCAMpp XGradCAM
cam_extractor = SmoothGradCAMpp(model)
#预处理
from torchvision import transforms
# 测试集图像预处理-RCTN:缩放、裁剪、转 Tensor、归一化
test_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
#运用图像分类预测
img_path = 'test_img/cat_dog.jpg'
img_pil = Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
input_tensor.shape
pred_logits = model(input_tensor)
pred_top1 = torch.topk(pred_logits, 1)
pred_id = pred_top1[1].detach().cpu().numpy().squeeze().item()
pred_id
#生成可解释性分析热力图
activation_map = cam_extractor(pred_id, pred_logits)
activation_map = activation_map[0][0].detach().cpu().numpy()
activation_map.shape
activation_map
#可视化
plt.imshow(activation_map)
plt.show()
from torchcam.utils import overlay_mask
result = overlay_mask(img_pil, Image.fromarray(activation_map), alpha=0.7)
result
import pandas as pd
df = pd.read_csv('imagenet_class_index.csv')
idx_to_labels = {}
idx_to_labels_cn = {}
for idx, row in df.iterrows():
idx_to_labels[row['ID']] = row['class']
idx_to_labels_cn[row['ID']] = row['Chinese']
idx_to_labels
img_path = 'test_img/cat_dog.jpg'
# 可视化热力图的类别ID,如果为 None,则为置信度最高的预测类别ID
show_class_id = 231
# show_class_id = None
# 是否显示中文类别
Chinese = True
# Chinese = False
# 前向预测
img_pil = Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
pred_logits = model(input_tensor)
pred_top1 = torch.topk(pred_logits, 1)
pred_id = pred_top1[1].detach().cpu().numpy().squeeze().item()
# 可视化热力图的类别ID,如果不指定,则为置信度最高的预测类别ID
if show_class_id:
show_id = show_class_id
else:
show_id = pred_id
show_class_id = pred_id
# 生成可解释性分析热力图
activation_map = cam_extractor(show_id, pred_logits)
activation_map = activation_map[0][0].detach().cpu().numpy()
result = overlay_mask(img_pil, Image.fromarray(activation_map), alpha=0.7)
# 在图像上写字
draw = ImageDraw.Draw(result)
if Chinese:
# 在图像上写中文
text_pred = 'Pred Class: {}'.format(idx_to_labels_cn[pred_id])
text_show = 'Show Class: {}'.format(idx_to_labels_cn[show_class_id])
else:
# 在图像上写英文
text_pred = 'Pred Class: {}'.format(idx_to_labels[pred_id])
text_show = 'Show Class: {}'.format(idx_to_labels[show_class_id])
# 文字坐标,中文字符串,字体,rgba颜色
draw.text((50, 100), text_pred, font=font, fill=(255, 0, 0, 1))
draw.text((50, 200), text_show, font=font, fill=(255, 0, 0, 1))
result
通过Python API方式,使用torchcam算法库,对自己训练的水果图像分类模型进行基于CAM的可解释性分析(输入单张图像)
#导入工具包
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from PIL import Image
import torch
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)
#导入训练好的pytorch模型
model = torch.load('checkpoint/fruit30_pytorch_20220814.pth')
model = model.eval().to(device)
#导入可解释性分析方法
from torchcam.methods import GradCAMpp
# CAM GradCAM GradCAMpp ISCAM LayerCAM SSCAM ScoreCAM SmoothGradCAMpp XGradCAM
cam_extractor = GradCAMpp(model)
#预处理
from torchvision import transforms
# 测试集图像预处理-RCTN:缩放、裁剪、转 Tensor、归一化
test_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
#运行图像分类预测
img_path = 'test_img/test_fruits.jpg'
img_pil = Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
pred_logits = model(input_tensor)
pred_id = torch.topk(pred_logits, 1)[1].detach().cpu().numpy().squeeze().item()
pred_id
#生成可解释性分析热力图
activation_map = cam_extractor(pred_id, pred_logits)
activation_map = activation_map[0][0].detach().cpu().numpy()
activation_map.shape
#可视化
plt.imshow(activation_map)
plt.show()
from torchcam.utils import overlay_mask
result = overlay_mask(img_pil, Image.fromarray(activation_map), alpha=0.7)
result
# 完整代码
img_path = 'test_img/test_fruits.jpg'
# 可视化热力图的类别,如果不指定,则为置信度最高的预测类别
show_class = '猕猴桃'
# 前向预测
img_pil = Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
pred_logits = model(input_tensor)
pred_id = torch.topk(pred_logits, 1)[1].detach().cpu().numpy().squeeze().item()
if show_class:
class_id = labels_to_idx[show_class]
show_id = class_id
else:
show_id = pred_id
# 获取热力图
activation_map = cam_extractor(show_id, pred_logits)
activation_map = activation_map[0][0].detach().cpu().numpy()
result = overlay_mask(img_pil, Image.fromarray(activation_map), alpha=0.4)
plt.imshow(result)
plt.axis('off')
plt.title('{}\nPred:{} Show:{}'.format(img_path, idx_to_labels[pred_id], show_class))
plt.show()
2. pytorch-grad-cam工具包:CAM热力图、Guided Grad-CAM热力图、DFF
#安装配置环境
pip install grad-cam torchcam
#下载pytorch-grad-cam
git clone https://github.com/jacobgil/pytorch-grad-cam.git
import os
# 存放测试图片
os.mkdir('test_img')
# 存放结果文件
os.mkdir('output')
# 存放模型权重文件
os.mkdir('checkpoint')
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220919-explain/test_img/puppies.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220919-explain/test_img/bear.jpeg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220919-explain/test_img/box_tabby.png -P test_img
# 蛇,来源:https://www.pexels.com/zh-cn/photo/80474/
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220919-explain/test_img/snake.jpg -P test_img
# 长颈鹿和斑马,来源:https://www.istockphoto.com/hk/%E7%85%A7%E7%89%87/giraffes-and-zebras-at-waterhole-gm503592172-82598465
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220919-explain/test_img/giraffe_zebra.jpg -P test_img
# 大象、狮子、羚羊,来源:https://www.istockphoto.com/hk/%E7%85%A7%E7%89%87/%E5%A4%A7%E8%B1%A1%E5%92%8C%E7%8D%85%E5%AD%90-gm1136053333-30244130
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220919-explain/test_img/africa.jpg -P test_img
# 边牧犬,来源:https://www.woopets.fr/assets/races/000/066/big-portrait/border-collie.jpg
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/border-collie.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/cat_dog.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_fruits.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_orange_2.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_bananan.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_kiwi.jpg -P test_img
# 草莓图像,来源:https://www.pexels.com/zh-cn/photo/4828489/
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/0818/test_草莓.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_石榴.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_orange.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_lemon.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_火龙果.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/watermelon1.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/banana1.jpg -P test_img
#下载ImageNet1000类别信息
wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/meta_data/imagenet_class_index.csv
#model
# 下载样例模型文件
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/checkpoints/fruit30_pytorch_20220814.pth -P checkpoint
# 下载 类别名称 和 ID索引号 的映射字典
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/fruit30/labels_to_idx.npy
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/fruit30/idx_to_labels.npy
import pytorch_grad_cam
对单张图像进行Grad-CAM热力图可解释性分析
#导入工具包
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from torchvision.models import resnet50
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
import torch
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)
#载入ImageNet预训练图像分类模型
model = resnet50(pretrained=True).eval().to(device)
#图像预处理
from torchvision import transforms
# 测试集图像预处理-RCTN:缩放、裁剪、转 Tensor、归一化
test_transform = transforms.Compose([transforms.Resize(512),
# transforms.CenterCrop(512),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
#载入测试图像
img_path = 'test_img/cat_dog.jpg'
img_pil=Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
input_tensor.shape
#指定分析的类别
#281虎斑猫 232 边牧犬
# 如果 targets 为 None,则默认为最高置信度类别
targets = [ClassifierOutputTarget(232)]
#分析模型结构,确定待分析的层
model
model.layer4[-1]
model.layer1[0]
#任选一个可解释性分析方法
from pytorch_grad_cam import GradCAM, HiResCAM, GradCAMElementWise, GradCAMPlusPlus, XGradCAM, AblationCAM, ScoreCAM, EigenCAM, EigenGradCAM, LayerCAM, FullGrad
# Grad-CAM
from pytorch_grad_cam import GradCAM
target_layers = [model.layer4[-1]]
cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)
# # Grad-CAM++
# from pytorch_grad_cam import GradCAMPlusPlus
# target_layers = [model.layer4[-1]]
# cam = GradCAMPlusPlus(model=model, target_layers=target_layers, use_cuda=True)
#生成CAM热力图
cam_map = cam(input_tensor=input_tensor, targets=targets)[0] # 不加平滑
# cam_map = cam(input_tensor=input_tensor, targets=targets, aug_smooth=True, eigen_smooth=True)[0] # 加平滑
#可视化
cam_map.shape
plt.imshow(cam_map)
plt.show()
import torchcam
from torchcam.utils import overlay_mask
result = overlay_mask(img_pil, Image.fromarray(cam_map), alpha=0.6) # alpha越小,原图越淡
result
result.save('output/B1.jpg')
对单张图像进行LayerCAM可解释性分析
#导入工具包
from torchvision.models import vgg16, resnet50
import numpy as np
import pandas as pd
import cv2
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
import torch
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)
# 载入ImageNet预训练图像分类模型
model = vgg16(pretrained=True).eval().to(device)
# model = resnet50(pretrained=True).eval().to(device)
#预处理
from torchvision import transforms
# 测试集图像预处理-RCTN:缩放、裁剪、转 Tensor、归一化
test_transform = transforms.Compose([transforms.Resize(224),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
img_path = 'test_img/snake.jpg'
# img_path = 'test_img/cat_dog.jpg'
img_pil = Image.open(img_path)
# img_pil
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
input_tensor.shape
#输入模型,执行前向预测
# 执行前向预测,得到所有类别的 logit 预测分数
pred_logits = model(input_tensor)
import torch.nn.functional as F
pred_softmax = F.softmax(pred_logits, dim=1) # 对 logit 分数做 softmax 运算
pred_softmax.shape
#获得图像分类预测结果
n = 5
top_n = torch.topk(pred_softmax, n)
top_n
# 解析出类别
pred_ids = top_n[1].cpu().detach().numpy().squeeze()
pred_ids
# 解析出置信度
confs = top_n[0].cpu().detach().numpy().squeeze()
confs
# 载入ImageNet 1000图像分类标签
df = pd.read_csv('imagenet_class_index.csv')
idx_to_labels = {}
for idx, row in df.iterrows():
idx_to_labels[row['ID']] = [row['wordnet'], row['class']]
for i in range(n):
class_name = idx_to_labels[pred_ids[i]][1] # 获取类别名称
confidence = confs[i] * 100 # 获取置信度
text = '{:<5} {:<15} {:>.4f}'.format(pred_ids[i], class_name, confidence)
print(text)
#指定分析的类别
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
# 如果 targets 为 None,则默认为最高置信度类别
targets = [ClassifierOutputTarget(56)]
#确定模型结构,确定待分析的层
# model
#选择可解释性分析方法
# LayerCAM
from pytorch_grad_cam import LayerCAM
target_layers = [model.features[8]] # vgg16
# target_layers = [model.layer3[0]] # resnet50
cam = LayerCAM(model=model, target_layers=target_layers, use_cuda=True)
#生成CAM热力图
cam_map = cam(input_tensor=input_tensor, targets=targets)[0] # 不加平滑
#可视化CAM热力图
cam_map.shape
cam_map.dtype
plt.imshow(cam_map)
plt.show()
import torchcam
from torchcam.utils import overlay_mask
result = overlay_mask(img_pil, Image.fromarray(cam_map), alpha=0.12) # alpha越小,原图越淡
# result
result.save('output/B2.jpg')
对单张图像,进行Guided Grad-CAM可解释性分析,绘制既具有类别判别性(Class-Discriminative),又具有高分辨率的细粒度热力图
#导入工具包
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
import torch
from torchvision import models
from pytorch_grad_cam import GradCAM, HiResCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, EigenGradCAM, LayerCAM, FullGrad, GradCAMElementWise
from pytorch_grad_cam import GuidedBackpropReLUModel
from pytorch_grad_cam.utils.image import show_cam_on_image, deprocess_image, preprocess_image
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)
model = models.resnet50(pretrained=True).eval().to(device)
from torchvision import transforms
# 测试集图像预处理-RCTN:缩放、裁剪、转 Tensor、归一化
test_transform = transforms.Compose([transforms.Resize(224),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
#载入测试图片
img_path = 'test_img/cat_dog.jpg'
img_pil = Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
input_tensor.shape
#选择可解释性分析方法
# GradCAM
from pytorch_grad_cam import GradCAM
target_layers = [model.layer4[-1]] # 要分析的层
targets = [ClassifierOutputTarget(232)] # 要分析的类别
cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)
#生成Grad-CAM热力图
cam_map = cam(input_tensor=input_tensor, targets=targets)[0] # 不加平滑
# cam_map = cam(input_tensor=input_tensor, targets=targets, aug_smooth=True, eigen_smooth=True)[0] # 加平滑
cam_map.shape
plt.imshow(cam_map)
plt.title('Grad-CAM')
plt.show()
import torchcam
from torchcam.utils import overlay_mask
result = overlay_mask(img_pil, Image.fromarray(cam_map), alpha=0.5) # alpha越小,原图越淡
plt.imshow(result)
plt.title('Grad-CAM')
plt.show()
#Guided Backpropagation算法
# 初始化算法
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=True)
# 生成 Guided Backpropagation热力图
gb_origin = gb_model(input_tensor, target_category=None)
gb_show = deprocess_image(gb_origin)
gb_show.shape
plt.imshow(gb_show)
plt.title('Guided Backpropagation')
plt.show()
#将Grad-CAM热力图与Guided Backpropagation热力图逐元素相乘
# Grad-CAM三通道热力图
cam_mask = cv2.merge([cam_map, cam_map, cam_map])
cam_mask.shape
# 逐元素相乘
guided_gradcam = deprocess_image(cam_mask * gb_origin)
guided_gradcam.shape
plt.imshow(guided_gradcam)
plt.title('Guided Grad-CAM')
plt.show()
cv2.imwrite('output/C1_guided_gradcam.jpg', guided_gradcam)
对单张图像,进行Guided Grad-CAM可解释性分析,绘制既具有类别判别性,又具有高分辨率的细粒度热力图
#导入工具包
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
import torch
from torchvision import models
from pytorch_grad_cam import GradCAM, HiResCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, EigenGradCAM, LayerCAM, FullGrad, GradCAMElementWise
from pytorch_grad_cam import GuidedBackpropReLUModel
from pytorch_grad_cam.utils.image import show_cam_on_image, deprocess_image, preprocess_image
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)
#导入模型
model = torch.load('checkpoint/fruit30_pytorch_20220814.pth')
model = model.eval().to(device)
idx_to_labels_cn = np.load('idx_to_labels.npy', allow_pickle=True).item()
idx_to_labels_cn
#图像预处理
from torchvision import transforms
# 测试集图像预处理-RCTN:缩放、裁剪、转 Tensor、归一化
test_transform = transforms.Compose([transforms.Resize(224),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
#载入测试图片
img_path = 'test_img/test_fruits.jpg'
img_pil = Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device) # 预处理
input_tensor.shape
#选择可解释性分析方法
# GradCAM
from pytorch_grad_cam import GradCAM
target_layers = [model.layer4[-1]] # 要分析的层
targets = [ClassifierOutputTarget(28)] # 要分析的类别
cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)
#生成Grad-CAM热力图
cam_map = cam(input_tensor=input_tensor, targets=targets)[0] # 不加平滑
# cam_map = cam(input_tensor=input_tensor, targets=targets, aug_smooth=True, eigen_smooth=True)[0] # 加平滑
cam_map.shape
plt.imshow(cam_map)
plt.title('Grad-CAM')
plt.show()
import torchcam
from torchcam.utils import overlay_mask
result = overlay_mask(img_pil, Image.fromarray(cam_map), alpha=0.5) # alpha越小,原图越淡
plt.imshow(result)
plt.title('Grad-CAM')
plt.show()
#Guided Backpropagation算法
# 初始化算法
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=True)
# 生成 Guided Backpropagation热力图
gb_origin = gb_model(input_tensor, target_category=None)
gb_show = deprocess_image(gb_origin)
gb_show.shape
plt.imshow(gb_show)
plt.title('Guided Backpropagation')
plt.show()
#两个热力图逐元素相乘
# Grad-CAM三通道热力图
cam_mask = cv2.merge([cam_map, cam_map, cam_map])
cam_mask.shape
# 逐元素相乘
guided_gradcam = deprocess_image(cam_mask * gb_origin)
guided_gradcam.shape
plt.imshow(guided_gradcam)
plt.title('Guided Grad-CAM')
plt.show()
cv2.imwrite('output/C2_guided_gradcam.jpg', guided_gradcam)
对单张图像,进行Deep feature factorization可解释性分析,展示concept discovery概念发现图
#导入工具包
import warnings
warnings.filterwarnings('ignore')
import requests
from PIL import Image
import numpy as np
import pandas as pd
import cv2
import json
import matplotlib.pyplot as plt
%matplotlib inline
from pytorch_grad_cam import DeepFeatureFactorization
from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image, deprocess_image
from pytorch_grad_cam import GradCAM
from torchvision.models import resnet50
import torch
#预处理函数
def get_image_from_path(img_path):
'''
输入图像文件路径,输出 图像array、归一化图像array、预处理后的tensor
'''
img = np.array(Image.open(img_path))
rgb_img_float = np.float32(img) / 255
input_tensor = preprocess_image(rgb_img_float,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
return img, rgb_img_float, input_tensor
def create_labels(concept_scores, top_k=2):
""" Create a list with the image-net category names of the top scoring categories"""
df = pd.read_csv('imagenet_class_index.csv')
labels = {}
for idx, row in df.iterrows():
labels[row['ID']] = row['class']
concept_categories = np.argsort(concept_scores, axis=1)[:, ::-1][:, :top_k]
concept_labels_topk = []
for concept_index in range(concept_categories.shape[0]):
categories = concept_categories[concept_index, :]
concept_labels = []
for category in categories:
score = concept_scores[concept_index, category]
label = f"{labels[category].split(',')[0]}:{score:.2f}"
concept_labels.append(label)
concept_labels_topk.append("\n".join(concept_labels))
return concept_labels_topk
#载入模型
model = resnet50(pretrained=True).eval()
#载入测试图像
img_path = 'test_img/cat_dog.jpg'
#预处理
img, rgb_img_float, input_tensor = get_image_from_path(img_path)
img.shape
input_tensor.shape
#初始化DFF算法
classifier = model.fc
dff = DeepFeatureFactorization(model=model,
target_layer=model.layer4,
computation_on_concepts=classifier)
# concept个数(图块颜色个数)
n_components = 5
concepts, batch_explanations, concept_outputs = dff(input_tensor, n_components)
concepts.shape
#图像中每个像素对应的concept热力图
# concept个数 x 高 x 宽
batch_explanations[0].shape
plt.imshow(batch_explanations[0][4])
plt.show()
#concept与类别的关系
concept_outputs.shape
concept_outputs = torch.softmax(torch.from_numpy(concept_outputs), axis=-1).numpy()
concept_outputs.shape
#每个concept展示前top_k个类别
# 每个概念展示几个类别
top_k = 2
concept_label_strings = create_labels(concept_outputs, top_k=top_k)
concept_label_strings
#生成可视化效果
from pytorch_grad_cam.utils.image import show_factorization_on_image
visualization = show_factorization_on_image(rgb_img_float,
batch_explanations[0],
image_weight=0.3, # 原始图像透明度
concept_labels=concept_label_strings)
result = np.hstack((img, visualization))
Image.fromarray(result)
#封装函数
def dff_show(img_path='test_img/cat_dog.jpg', n_components=5, top_k=2, hstack=False):
img, rgb_img_float, input_tensor = get_image_from_path(img_path)
dff = DeepFeatureFactorization(model=model,
target_layer=model.layer4,
computation_on_concepts=classifier)
concepts, batch_explanations, concept_outputs = dff(input_tensor, n_components)
concept_outputs = torch.softmax(torch.from_numpy(concept_outputs), axis=-1).numpy()
concept_label_strings = create_labels(concept_outputs, top_k=top_k)
visualization = show_factorization_on_image(rgb_img_float,
batch_explanations[0],
image_weight=0.3, # 原始图像透明度
concept_labels=concept_label_strings)
if hstack:
result = np.hstack((img, visualization))
else:
result = visualization
display(Image.fromarray(result))
dff_show()
dff_show(hstack=True)
dff_show(img_path='test_img/box_tabby.png', hstack=True)
dff_show(img_path='test_img/puppies.jpg', hstack=True)
dff_show(img_path='test_img/bear.jpeg', hstack=True)
dff_show(img_path='test_img/bear.jpeg', n_components=10, top_k=1, hstack=True)
dff_show(img_path='test_img/giraffe_zebra.jpg', n_components=5, top_k=2, hstack=True)
DFF
#导入工具包
import warnings
warnings.filterwarnings('ignore')
import requests
from PIL import Image
import numpy as np
import pandas as pd
import cv2
import json
import matplotlib.pyplot as plt
%matplotlib inline
from pytorch_grad_cam import DeepFeatureFactorization
from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image, deprocess_image
from pytorch_grad_cam import GradCAM
from torchvision.models import resnet50
import torch
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
print('device', device)
#预处理函数
from torchvision import transforms
# 测试集图像预处理-RCTN:缩放、裁剪、转 Tensor、归一化
test_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def get_image_from_path(img_path):
'''
输入图像文件路径,输出 图像array、归一化图像array、预处理后的tensor
'''
img = np.array(Image.open(img_path))
rgb_img_float = np.float32(img) / 255
input_tensor = preprocess_image(rgb_img_float,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
return img, rgb_img_float, input_tensor
def create_labels(concept_scores, top_k=2):
""" Create a list with the image-net category names of the top scoring categories"""
labels = {
0:'Hami Melon',
1:'Cherry Tomatoes',
2:'Shanzhu',
3:'Bayberry',
4:'Grapefruit',
5:'Lemon',
6:'Longan',
7:'Pears',
8:'Coconut',
9:'Durian',
10:'Dragon Fruit',
11:'Kiwi',
12:'Pomegranate',
13:'Sugar orange',
14:'Carrots',
15:'Navel orange',
16:'Mango',
17:'Balsam pear',
18:'Apple Red',
19:'Apple Green',
20:'Strawberries',
21:'Litchi',
22:'Pineapple',
23:'Grape White',
24:'Grape Red',
25:'Watermelon',
26:'Tomato',
27:'Cherts',
28:'Banana',
29:'Cucumber'
}
concept_categories = np.argsort(concept_scores, axis=1)[:, ::-1][:, :top_k]
concept_labels_topk = []
for concept_index in range(concept_categories.shape[0]):
categories = concept_categories[concept_index, :]
concept_labels = []
for category in categories:
score = concept_scores[concept_index, category]
label = f"{labels[category].split(',')[0]}:{score:.2f}"
concept_labels.append(label)
concept_labels_topk.append("\n".join(concept_labels))
return concept_labels_topk
#载入模型
model = torch.load('checkpoint/fruit30_pytorch_20220814.pth')
model = model.eval().to(device)
#载入测试图像
img_path = 'test_img/test_fruits.jpg'
img_pil = Image.open(img_path)
input_tensor = test_transform(img_pil).unsqueeze(0).to(device)
input_tensor.shape
#预处理
img, rgb_img_float, input_tensor = get_image_from_path(img_path)
img.shape
input_tensor.shape
#初始化DFF算法
classifier = model.fc
dff = DeepFeatureFactorization(model=model,
target_layer=model.layer4,
computation_on_concepts=classifier)
# concept个数(图块颜色个数)
n_components = 5
concepts, batch_explanations, concept_outputs = dff(input_tensor, n_components)
concepts.shape
#图像中每个像素对应的concept热力图
# concept个数 x 高 x 宽
batch_explanations[0].shape
plt.imshow(batch_explanations[0][2])
plt.show()
#concept与类别的关系
concept_outputs.shape
concept_outputs = torch.softmax(torch.from_numpy(concept_outputs), axis=-1).numpy()
concept_outputs.shape
#每个concept展示前top_k个类别
# 每个概念展示几个类别
top_k = 2
concept_label_strings = create_labels(concept_outputs, top_k=top_k)
concept_label_strings
#生成可视化效果
from pytorch_grad_cam.utils.image import show_factorization_on_image
visualization = show_factorization_on_image(rgb_img_float,
batch_explanations[0],
image_weight=0.3, # 原始图像透明度
concept_labels=concept_label_strings)
result = np.hstack((img, visualization))
Image.fromarray(result)
#封装函数
def dff_show(img_path='test_img/cat_dog.jpg', n_components=5, top_k=2, hstack=False):
img, rgb_img_float, input_tensor = get_image_from_path(img_path)
dff = DeepFeatureFactorization(model=model,
target_layer=model.layer4,
computation_on_concepts=classifier)
concepts, batch_explanations, concept_outputs = dff(input_tensor, n_components)
concept_outputs = torch.softmax(torch.from_numpy(concept_outputs), axis=-1).numpy()
concept_label_strings = create_labels(concept_outputs, top_k=top_k)
visualization = show_factorization_on_image(rgb_img_float,
batch_explanations[0],
image_weight=0.3, # 原始图像透明度
concept_labels=concept_label_strings)
if hstack:
result = np.hstack((img, visualization))
else:
result = visualization
display(Image.fromarray(result))
dff_show(img_path='test_img/test_草莓.jpg', hstack=True)
dff_show(img_path='test_img/test_火龙果.jpg', hstack=True)
dff_show(img_path='test_img/test_石榴.jpg', hstack=True)
dff_show(img_path='test_img/test_bananan.jpg', hstack=True)
dff_show(img_path='test_img/test_kiwi.jpg', hstack=True)
3. Captum工具包:遮挡、梯度
4. shap
#安装配置环境
pip install numpy pandas matplotlib requests tqdm opencv-python pillow shap tensorflow keras -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
import shap
import os
# 存放测试图片
os.mkdir('test_img')
# 存放结果文件
os.mkdir('output')
# 存放训练得到的模型权重
os.mkdir('checkpoint')
# 存放标注文件
os.mkdir('data')
#下载ImageNet1000类别信息
wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/meta_data/imagenet_class_index.csv -P data
# 下载样例模型文件
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/checkpoints/fruit30_pytorch_20220814.pth -P checkpoint
# 下载 类别名称 和 ID索引号 的映射字典
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/fruit30/labels_to_idx.npy -P data
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/fruit30/idx_to_labels.npy -P data
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220919-explain/imagenet_class_index.json -P data
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/dataset/fruit30/idx_to_labels_en.npy -P data
#下载测试图像文件至test_img文件夹
# 边牧犬,来源:https://www.woopets.fr/assets/races/000/066/big-portrait/border-collie.jpg
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/border-collie.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/cat_dog.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/0818/room_video.mp4 -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/swan-3299528_1280.jpg -P test_img
# 草莓图像,来源:https://www.pexels.com/zh-cn/photo/4828489/
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/0818/test_草莓.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_fruits.jpg -P test_img
!wget https://zihao-openmmlab.obs.myhuaweicloud.com/20220716-mmclassification/test/0818/test_orange_2.jpg -P test_img
!wget https://zihao-openmmlab.obs.cn-east-3.myhuaweicloud.com/20220716-mmclassification/test/banana-kiwi.png -P test_img
对自己训练得到的30类水果图像分类模型进行可解释性分析,可视化制定预测类别的shap值热力图
#导入工具包
import json
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms
import shap
# 有 GPU 就用 GPU,没有就用 CPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)
#载入30类水果图像分类图像
model = torch.load('checkpoint/fruit30_pytorch_20220814.pth')
model = model.eval().to(device)
#载入分类数据集的类别
idx_to_labels = np.load('data/idx_to_labels_en.npy', allow_pickle=True).item()
idx_to_labels
class_names = list(idx_to_labels.values())
class_names
#载入一张测试图像,整理纬度
# img_path = 'test_img/test_草莓.jpg'
img_path = 'test_img/test_fruits.jpg'
img_pil = Image.open(img_path)
X = torch.Tensor(np.array(img_pil)).unsqueeze(0)
X.shape
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
def nhwc_to_nchw(x: torch.Tensor) -> torch.Tensor:
if x.dim() == 4:
x = x if x.shape[1] == 3 else x.permute(0, 3, 1, 2)
elif x.dim() == 3:
x = x if x.shape[0] == 3 else x.permute(2, 0, 1)
return x
def nchw_to_nhwc(x: torch.Tensor) -> torch.Tensor:
if x.dim() == 4:
x = x if x.shape[3] == 3 else x.permute(0, 2, 3, 1)
elif x.dim() == 3:
x = x if x.shape[2] == 3 else x.permute(1, 2, 0)
return x
transform= [
transforms.Lambda(nhwc_to_nchw),
transforms.Resize(224),
transforms.Lambda(lambda x: x*(1/255)),
transforms.Normalize(mean=mean, std=std),
transforms.Lambda(nchw_to_nhwc),
]
inv_transform= [
transforms.Lambda(nhwc_to_nchw),
transforms.Normalize(
mean = (-1 * np.array(mean) / np.array(std)).tolist(),
std = (1 / np.array(std)).tolist()
),
transforms.Lambda(nchw_to_nhwc),
]
transform = torchvision.transforms.Compose(transform)
inv_transform = torchvision.transforms.Compose(inv_transform)
#构建模型预测函数
def predict(img: np.ndarray) -> torch.Tensor:
img = nhwc_to_nchw(torch.Tensor(img)).to(device)
output = model(img)
return output
def predict(img):
img = nhwc_to_nchw(torch.Tensor(img)).to(device)
output = model(img)
return output
#测试整个工作流正常
Xtr = transform(X)
out = predict(Xtr[0:1])
out.shape
classes = torch.argmax(out, axis=1).detach().cpu().numpy()
print(f'Classes: {classes}: {np.array(class_names)[classes]}')
#设置shap可解释性分析算法
# 构造输入图像
input_img = Xtr[0].unsqueeze(0)
input_img.shape
batch_size = 50
n_evals = 5000 # 迭代次数越大,显著性分析粒度越精细,计算消耗时间越长
# 定义 mask,遮盖输入图像上的局部区域
masker_blur = shap.maskers.Image("blur(64, 64)", Xtr[0].shape)
# 创建可解释分析算法
explainer = shap.Explainer(predict, masker_blur, output_names=class_names)
#指定单个预测类别
# 28:香蕉 banana
shap_values = explainer(input_img, max_evals=n_evals, batch_size=batch_size, outputs=[28])
# 整理张量维度
shap_values.data = inv_transform(shap_values.data).cpu().numpy()[0] # 原图
shap_values.values = [val for val in np.moveaxis(shap_values.values[0],-1, 0)] # shap值热力图
# 原图
shap_values.data.shape
# shap值热力图
shap_values.values[0].shape
# 可视化
shap.image_plot(shap_values=shap_values.values,
pixel_values=shap_values.data,
labels=shap_values.output_names)
#指定多个预测类别
# 5 柠檬
# 12 石榴
# 15 脐橙
shap_values = explainer(input_img, max_evals=n_evals, batch_size=batch_size, outputs=[5, 12, 15])
# 整理张量维度
shap_values.data = inv_transform(shap_values.data).cpu().numpy()[0] # 原图
shap_values.values = [val for val in np.moveaxis(shap_values.values[0],-1, 0)] # shap值热力图
# shap值热力图:每个像素,对于每个类别的shap值
shap_values.shape
# 可视化
shap.image_plot(shap_values=shap_values.values,
pixel_values=shap_values.data,
labels=shap_values.output_names)
#前k个预测类别
topk = 5
shap_values = explainer(input_img, max_evals=n_evals, batch_size=batch_size, outputs=shap.Explanation.argsort.flip[:topk])
# shap值热力图:每个像素,对于每个类别的shap值
shap_values.shape
# 整理张量维度
shap_values.data = inv_transform(shap_values.data).cpu().numpy()[0] # 原图
shap_values.values = [val for val in np.moveaxis(shap_values.values[0],-1, 0)] # 各个类别的shap值热力图
# 各个类别的shap值热力图
len(shap_values.values)
# 第一个类别,shap值热力图
shap_values.values[0].shape
# 可视化
shap.image_plot(shap_values=shap_values.values,
pixel_values=shap_values.data,
labels=shap_values.output_names
)
使用shap库的GradientExplainer,对预训练VGG16模型的中间层输出,计算shap值
#导入工具包
import torch, torchvision
from torch import nn
from torchvision import transforms, models, datasets
import shap
import json
import numpy as np
#载入模型
# load the model
model = models.vgg16(pretrained=True).eval()
#载入数据集、预处理
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
def normalize(image):
if image.max() > 1:
image /= 255
image = (image - mean) / std
# in addition, roll the axis so that they suit pytorch
return torch.tensor(image.swapaxes(-1, 1).swapaxes(2, 3)).float()
#指定测试图像
X, y = shap.datasets.imagenet50()
X /= 255
to_explain = X[[39, 41]]
#载入类别和索引号
url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
fname = shap.datasets.cache(url)
with open(fname) as f:
class_names = json.load(f)
#计算模型中间层,在输入图像上的shap值
# 指定中间层
layer_index = 7
# 迭代次数,200次大约需计算 5 分钟
samples = 200
e = shap.GradientExplainer((model, model.features[layer_index]), normalize(X))
shap_values,indexes = e.shap_values(normalize(to_explain), ranked_outputs=2, nsamples=samples)
#预测类别名称
index_names = np.vectorize(lambda x: class_names[str(x)][1])(indexes)
index_names
#可视化
shap_values = [np.swapaxes(np.swapaxes(s, 2, 3), 1, -1) for s in shap_values]
shap.image_plot(shap_values, to_explain, index_names)
#在图像上引入局部平滑
# 计算模型中间层,在输入图像上的shap值
explainer = shap.GradientExplainer((model, model.features[layer_index]), normalize(X), local_smoothing=0.5)
shap_values, indexes = explainer.shap_values(normalize(to_explain), ranked_outputs=2, nsamples=samples)
# 预测类别名称
index_names = np.vectorize(lambda x: class_names[str(x)][1])(indexes)
# 可视化
shap_values = [np.swapaxes(np.swapaxes(s, 2, 3), 1, -1) for s in shap_values]
shap.image_plot(shap_values, to_explain, index_names)
将输入图像局部遮挡,对resnet50图像分类模型的预测结果进行可解释性分析
#导入工具包
import json
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
import shap
#导入预训练模型
model = ResNet50(weights='imagenet')
#导入数据集
X, y = shap.datasets.imagenet50()
#构建模型预测函数
def f(x):
tmp = x.copy()
preprocess_input(tmp)
return model(tmp)
#构建局部遮挡函数
masker = shap.maskers.Image("inpaint_telea", X[0].shape)
#输出类别名称
url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
with open(shap.datasets.cache(url)) as file:
class_names = [v[1] for v in json.load(file).values()]
#创建Explainer
explainer = shap.Explainer(f, masker, output_names=class_names)
#计算shap值
shap_values = explainer(X[1:3], max_evals=100, batch_size=50, outputs=shap.Explanation.argsort.flip[:4])
#可视化
shap.image_plot(shap_values)
#更加细粒度的shap计算和可视化
masker_blur = shap.maskers.Image("blur(128,128)", X[0].shape)
explainer_blur = shap.Explainer(f, masker_blur, output_names=class_names)
shap_values_fine = explainer_blur(X[1:3], max_evals=5000, batch_size=50, outputs=shap.Explanation.argsort.flip[:4])
shap.image_plot(shap_values_fine)
shap.image_plot(shap_values_fine)