-1.更新于2025.4
借助于GPT,这个功能一下子就可以实现,下面展示GPT的成果
输出文件可以直接用pheatmap画图
python interact_density_to_matrix.py \
-bin_num 100 \
-row 10 \
-length chrom_length.txt \
-bed tijian.bed \
-ow whole_genome.heatmap.txt \
-op per_chr.heatmap.txt
cat interact_density_to_matrix.py
#!/usr/bin/env python3
import argparse
import pandas as pd
import numpy as np
# 解析参数
parser = argparse.ArgumentParser(description="Generate interchromatin anchor density matrices (whole genome and per chromosome).")
parser.add_argument("-bin_num", type=int, default=100, help="Number of bins to divide each chromosome (default: 100)")
parser.add_argument("-row", type=int, default=10, help="Number of rows to repeat the whole-genome density vector (default: 10)")
parser.add_argument("-length", required=True, help="Chromosome length file (tsv: chrom\\tlength)")
parser.add_argument("-bed", required=True, help="Interchromatin interaction bed file")
parser.add_argument("-ow", required=True, help="Output file for whole-genome heatmap")
parser.add_argument("-op", required=True, help="Output file for per-chromosome heatmap")
args = parser.parse_args()
# 参数读取
bin_number = args.bin_num
repeat_rows = args.row
chrom_len_file = args.length
bed_file = args.bed
out_whole = args.ow
out_perchr = args.op
# 读取染色体长度
chrom_len = pd.read_csv(chrom_len_file, sep="\t", header=None, names=["chrom", "length"])
chrom_len_dict = dict(zip(chrom_len["chrom"], chrom_len["length"]))
chrom_list = list(chrom_len["chrom"]) # 保持顺序!
# 读取体间互作 bed
df = pd.read_csv(bed_file, sep="\t", header=None,
names=["chr1", "start1", "end1", "chr2", "start2", "end2"])
# 计算相对位置 bin 索引
def pos_to_bin(chrom, pos):
length = chrom_len_dict.get(chrom, None)
if length is None or length == 0:
return None
rel = pos / length
bin_idx = int(rel * bin_number)
return min(bin_idx, bin_number - 1)
# ==== 1. Whole-genome 密度统计 ====
bin_counts = np.zeros(bin_number)
for _, row in df.iterrows():
bin1 = pos_to_bin(row["chr1"], row["start1"])
bin2 = pos_to_bin(row["chr2"], row["start2"])
if bin1 is not None:
bin_counts[bin1] += 1
if bin2 is not None:
bin_counts[bin2] += 1
# 重复行生成矩阵
matrix_whole = np.tile(bin_counts, (repeat_rows, 1))
df_whole = pd.DataFrame(matrix_whole, columns=[f"bin{i+1}" for i in range(bin_number)])
df_whole.to_csv(out_whole, sep="\t", index=False)
# ==== 2. Per-chromosome 密度统计 ====
perchr_rows = []
for chrom in chrom_list:
bin_array = np.zeros(bin_number)
# 过滤 chr1 的所有 start1 和 chr2 的所有 start2
df_chr1 = df[df["chr1"] == chrom]
df_chr2 = df[df["chr2"] == chrom]
for _, row in df_chr1.iterrows():
bin_idx = pos_to_bin(chrom, row["start1"])
if bin_idx is not None:
bin_array[bin_idx] += 1
for _, row in df_chr2.iterrows():
bin_idx = pos_to_bin(chrom, row["start2"])
if bin_idx is not None:
bin_array[bin_idx] += 1
perchr_row = [chrom] + list(bin_array)
perchr_rows.append(perchr_row)
# 构建 dataframe 并写出
colnames = ["chrom"] + [f"bin{i+1}" for i in range(bin_number)]
df_perchr = pd.DataFrame(perchr_rows, columns=colnames)
df_perchr.to_csv(out_perchr, sep="\t", index=False)
在R里面画图:
library(pheatmap)
library(grid)
setwd("C:/Users/匡卓然/Desktop/FenshuChrVar/06.3D/loop")
####
a1 = read.table("Erothschildi.tijian.perchr.pheatmap.txt",sep = "\t",header = T,row.names = 1)
p1=pheatmap(a1,cluster_rows = F,cluster_cols = F,scale="row",border = F,
show_colnames = FALSE,show_rownames = T,
xlab = "Relative position along chromosome")
pdf("Erothschildi.tijian.perchr.pheatmap.pdf",width = 8,height = 6)
p1
dev.off()
####
a2 = read.table("Erothschildi.tijian.wg.pheatmap.txt",sep = "\t",header = T)
p2=pheatmap(a2,cluster_rows = F,cluster_cols = F,scale="row",border = F,
show_colnames = FALSE,show_rownames = T,
xlab = "Relative position along chromosome")
pdf("Erothschildi.tijian.wg.pheatmap.pdf",width = 8,height = 1.5)
p2
dev.off()
####
以下内容仅作记录:
0.仅作个人笔记使用
意思就是只考虑我能不能看懂
fithic的安装、使用这里略去
假定已经运行完了fithic
输入文件长这样:

六列, 两部分; 表示显著互作的两个区间
#Start
awk '{if($1!=$4)print}' XX.10k.fithic.chr.bed > XX.10k.fithic.tijian.bed
#处理输入文件
#得到 染色体体间互作的信息
cut -f1,2,3 XX.10k.fithic.tijian.bed > tmp1
cut -f4,5,6 XX.10k.fithic.tijian.bed > tmp2
cat tmp1 tmp2 | bedtools sort -i - | uniq -c > tmp3
awk '{print $2"\t"$3"\t"$4"\t"$1}' tmp3 > XX.10k.fithic.tijian.density
#X.10k.fithic.tijian.density
#最后的输出文件, 这个文件后文还会用到
rm tmp1 tmp2 tmp3
#删除中间文件
输出文件:

结果文件长这样
最后画一个核型图
核型图的绘制参考: https://blog.csdn.net/u013429737/article/details/116121440

最终结果
2.上述这个图不能满足我的需要, 我应该是需要meta-plot的样式:
ref: https://doi.org/10.1038/s41467-021-27091-0

文章的Fig. S16
#Start
#获取基因组每条染色体的长度
bioawk -c fastx '{print $name,length($seq)}' XX.Chr.fa > XX.length
#拿到每个显著互作在染色体上的相对位置
#相对位置是这样定义的:
#该显著互作所在的位置占整条染色体长度的比例
#比如互作位于染色体的末端, 其位置也就是染色体长度, 相对位置也就是1
python Find_A_in_B.v2.py XX.10k.fithic.tijian.density XX.length | awk '{print $4/$2"\t"$5/$2"\t"$6}' > tmp1
#脚本见教程最后
#此时tmp1的前两列就是相对位置, 为了更标准化, 我们保留两位小数或者三位小数
#awk '{for(i = 1; i <= NF; i++) {printf("%.2f\t", $i)} {printf("\n")}}' tmp1 > tmp2 #保留两位小数
#awk '{for(i = 1; i <= NF; i++) {printf("%.3f\t", $i)} {printf("\n")}}' tmp1 > tmp2 #保留三位小数
#上述命令行作废,因为保留小数的同时它还做了四舍五入
awk '{printf "%.3f %.3f %.3f\n", int($1*1000)/1000, int($2*1000)/1000, int($3*1000)/1000}' tmp1 > tmp2
#awk中的1000就是保留三位小数的意思
awk '{print $1"\t"$1+0.001"\t"$3}' tmp2 > tmp3
rm tmp1 tmp2
#获取bed文件

tmp3的文件内容此时是这样的, 前两列是相对位置, 第三列
#续上文
#去重并累加
awk -F "\t" '{sum[$1"\t"$2]+=$3}END{for(c in sum){print c,sum[c]}}' tmp3 | sort -Vk 1 > tmp4
#一定要排序
#awk '{print $2-$1}' tmp4 | sort -u
#如果只输出一个数字0.01, 就表明没问题
awk '{print $3}' tmp4 | tr "\n" "," > XX.meta.tijian.plot.csv
rm tmp3 tmp4
#接下来是R里面的代码
library(pheatmap)
a = read.csv("XX.meta.tijian.plot.csv", header = F)
#标准化
b = t(a)
a = t(scale(b))
r
pheatmap(a, cluster_rows = F, cluster_cols = F)

最后画出来就是这样, 明显都是染色体两端多
但这里有一个问题:
去重并累加
awk -F "\t" '{sum[2]+=$3}END{for(c in sum){print c,sum[c]}}' tmp3 | sort -Vk 1 > tmp4
一定要排序
累加可能不合理, 作为metaplot, 或许是每条染色体求均值, 这里先埋下伏笔
后面再改
3.前文所述问题无法解决, 所以想了想把1.中所述改一下
还是画1.里面的图,不过不用真实的坐标和长度,全部改成染色体的相对长度
代码就是2.所示,不过加个for循环
#Start
#获取基因组每条染色体的长度
bioawk -c fastx '{print $name,length($seq)}' XX.Chr.fa > XX.length
python Find_A_in_B.v2.py galili.10k.fithic.tijian.density galili.length > tmp1
for i in {0..999}
do
echo $i >> tmp
done
awk '{print $1/1000}' tmp | awk '{printf "%.3f\n", int($1*1000)/1000}' > mode
rm tmp
for i in {1..26}
do
grep -w "Chr${i}" tmp1 | awk '{print $4/$2"\t"$5/$2"\t"$6}' > Chr${i}.tmp2
awk '{printf "%.3f %.3f %.3f\n", int($1*1000)/1000, int($2*1000)/1000, int($3*1000)/1000}' Chr${i}.tmp2 > Chr${i}.tmp3
awk '{print $1"\t"$1+0.001"\t"$3}' Chr${i}.tmp3 > Chr${i}.tmp4
awk -F "\t" '{sum[$1"\t"$2]+=$3}END{for(c in sum){print c,sum[c]}}' Chr${i}.tmp4 | sort -Vk 1 > Chr${i}.tmp5
python fill.gap.py Chr${i}.tmp5 mode | awk '{print $4}' | sed "1i Chr${i}" | tr "\n" "," | sed 's/,$/\n/' >> XX.tijian.everyChr.plot.csv
done
rm *tmp*
#画图 - 下面是R的代码
library(pheatmap)
a = read.csv("XX.tijian.everyChr.plot.csv", header = F)
pheatmap(a, cluster_rows = F, cluster_cols = F)

画出来如图所示
#Find_A_in_B.v2.py
import sys
list1 = {}
with open(sys.argv[1], 'r') as f:
for line in f:
line = line.strip()
content = line.split('\t')
name = content[0]
if name in list1:
list1[name].append(line)
else:
list1[name] = [line]
with open(sys.argv[2], 'r') as f:
for line in f:
line = line.strip()
content1 = line.split('\t')
name1 = content1[0]
if name1 in list1:
for match in list1[name1]:
print(line + '\t' + match)
#fill.gap.py
import sys
list1 = {}
with open(sys.argv[1], 'r') as f:
for line in f:
line = line.strip()
content = line.split('\t')
name = content[0]
if name in list1:
list1[name].append(line)
else:
list1[name] = [line]
with open(sys.argv[2], 'r') as f:
for line in f:
line = line.strip()
content1 = line.split('\t')
name1 = content1[0]
if name1 in list1:
for match in list1[name1]:
print(line + '\t' + match)
else:
print(line + '\t' + line + '\t' + line + '\t' + "0")