题意:
有一个初始状态全为0的矩阵,一共有三个操作
1 x1 y1 x2 y2 v:子矩阵(x1,y1,x2,y2)所有元素增加v
2 x1 y1 x2 y2 v:子矩阵(x1,y1,x2,y2)所有元素设为v
3 x1 y1 x2 y2 v:查询子矩阵(x1,y1,x2,y2)的元素和,最大值和最小值
思路:
因为总元素葛素不超过10^6,而且更新是对于连续的行进行更新,所以我们可以把矩阵转化为一个一元组,通过下一行拼接在上一行的末尾,那么在更新与查询的时候只要对相应的区间进行操作即可
#include <iostream> #include <stdio.h> #include <string.h> #include <stack> #include <queue> #include <map> #include <set> #include <vector> #include <math.h> #include <bitset> #include <algorithm> #include <climits> using namespace std; #define lson 2*i #define rson 2*i+1 #define LS l,mid,lson #define RS mid+1,r,rson #define UP(i,x,y) for(i=x;i<=y;i++) #define DOWN(i,x,y) for(i=x;i>=y;i--) #define MEM(a,x) memset(a,x,sizeof(a)) #define W(a) while(a) #define gcd(a,b) __gcd(a,b) #define LL long long #define N 1000005 #define MOD 1000000007 #define INF 0x3f3f3f3f #define EXP 1e-8 #define lowbit(x) (x&-x) int ans_sum,ans_max,ans_min; struct node { int l,r; int sum,max,min; int add,set; } a[N<<2]; void pushdown(int i) { if(a[i].set!=-1) { a[lson].set = a[rson].set = a[i].set; a[lson].add = a[rson].add = 0; a[lson].min = a[rson].min = a[i].set; a[lson].max = a[rson].max = a[i].set; a[lson].sum = (a[lson].r-a[lson].l+1)*a[i].set; a[rson].sum = (a[rson].r-a[rson].l+1)*a[i].set; a[i].set = -1; } if(a[i].add>0) { a[lson].add+=a[i].add; a[rson].add+=a[i].add; a[lson].min+=a[i].add; a[rson].min+=a[i].add; a[lson].max+=a[i].add; a[rson].max+=a[i].add; a[lson].sum+=a[i].add*(a[lson].r-a[lson].l+1); a[rson].sum+=a[i].add*(a[rson].r-a[rson].l+1); a[i].add = 0; } } void pushup(int i) { a[i].sum=a[lson].sum+a[rson].sum; a[i].max=max(a[lson].max,a[rson].max); a[i].min=min(a[lson].min,a[rson].min); } void build(int l,int r,int i) { a[i].l = l; a[i].r = r; a[i].sum = 0; a[i].max = 0; a[i].min = 0; a[i].add = 0; a[i].set = -1; if(l == r) return; int mid = (l+r)>>1; build(LS); build(RS); } void set_data(int l,int r,int i,int val) { if(a[i].l==l&&a[i].r==r) { a[i].sum = val*(r-l+1); a[i].min = val; a[i].max = val; a[i].set = val; a[i].add = 0; return; } pushdown(i); int mid = (a[i].l+a[i].r)>>1; if(r<=mid) set_data(l,r,lson,val); else if(l>mid) set_data(l,r,rson,val); else { set_data(LS,val); set_data(RS,val); } pushup(i); } void add_data(int l,int r,int i,int val) { if(a[i].l==l&&a[i].r==r) { a[i].sum += val*(r-l+1); a[i].min += val; a[i].max += val; a[i].add += val; return; } pushdown(i); int mid = (a[i].l+a[i].r)>>1; if(r<=mid) add_data(l,r,lson,val); else if(l>mid) add_data(l,r,rson,val); else { add_data(LS,val); add_data(RS,val); } pushup(i); } void query(int l,int r,int i) { if(l == a[i].l && a[i].r == r) { ans_sum += a[i].sum; ans_max = max(ans_max,a[i].max); ans_min = min(ans_min,a[i].min); return ; } pushdown(i); int mid = (a[i].l+a[i].r)>>1; if(r<=mid) query(l,r,lson); else if(l>mid) query(l,r,rson); else { query(LS); query(RS); } pushup(i); } int main() { int op,i,j,x1,x2,y1,y2,v; int r,c,m; while(~scanf("%d%d%d",&r,&c,&m)) { build(1,r*c,1); while(m--) { scanf("%d",&op); if(op==1) { scanf("%d%d%d%d%d",&x1,&y1,&x2,&y2,&v); for(i = x1; i<=x2; i++) add_data((i-1)*c+y1,(i-1)*c+y2,1,v); } else if(op==2) { scanf("%d%d%d%d%d",&x1,&y1,&x2,&y2,&v); for(i = x1; i<=x2; i++) set_data((i-1)*c+y1,(i-1)*c+y2,1,v); } else { scanf("%d%d%d%d",&x1,&y1,&x2,&y2); ans_sum = 0; ans_max = -INF; ans_min = INF; for(i = x1; i<=x2; i++) query((i-1)*c+y1,(i-1)*c+y2,1); printf("%d %d %d\n",ans_sum,ans_min,ans_max); } } } return 0; }
UVA11992:Fast Matrix Operations(线段树)
原文地址:http://blog.csdn.net/libin56842/article/details/46489841