QOJ.ac
QOJ
ID | Problem | Submitter | Result | Time | Memory | Language | File size | Submit time | Judge time |
---|---|---|---|---|---|---|---|---|---|
#216880 | #7185. Poor Students | cardinal_city# | TL | 2159ms | 7088kb | C++23 | 21.9kb | 2023-10-16 05:00:05 | 2023-10-16 05:00:05 |
Judging History
answer
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
using ld = long double;
using pii = pair<int, int>;
using vi = vector<int>;
#define rep(i, a, b) for(int i = a; i < (b); ++i)
#define all(x) begin(x), end(x)
#define sz(x) (int)(x).size()
#define smx(a, b) a = max(a, b);
#define smn(a, b) a = min(a, b);
#define pb push_back
#define endl '\n'
const ll MOD = 1e9 + 7;
const ld EPS = 1e-9;
mt19937 rng(time(0));
template <typename Container>
struct less_container {
const Container* cont = nullptr;
less_container() = default;
less_container(const Container& cont) : cont(&cont) {}
inline bool operator()(int u, int v) const {
return tie((*cont)[u], u) < tie((*cont)[v], v);
}
};
template <typename Container>
struct greater_container {
const Container* cont = nullptr;
greater_container() = default;
greater_container(const Container& cont) : cont(&cont) {}
inline bool operator()(int u, int v) const {
return tie((*cont)[u], u) > tie((*cont)[v], v);
}
};
// Binary heap over the integers [0...N) with decrease-key.
// By default a min-heap, but you'll usually need a custom compare (e.g. dijkstra).
template <typename Compare = less<>>
struct binary_int_heap {
vector<int> c, id;
Compare comp;
explicit binary_int_heap(int N = 0, const Compare& comp = Compare())
: c(0, 0), id(N, -1), comp(comp) {}
bool empty() const { return c.empty(); }
size_t size() const { return c.size(); }
bool contains(int u) const { return id[u] != -1; }
int top() const {
assert(!empty());
return c[0];
}
void push(int u) {
assert(!contains(u));
id[u] = c.size(), c.push_back(u);
heapify_up(id[u]);
}
int pop() {
assert(!empty());
int u = c[0];
c[0] = c.back();
id[c[0]] = 0, id[u] = -1;
c.pop_back();
heapify_down(0);
return u;
}
void improve(int u) { assert(contains(u)), heapify_up(id[u]); }
void decline(int u) { assert(contains(u)), heapify_down(id[u]); }
void push_or_improve(int u) { contains(u) ? improve(u) : push(u); }
void push_or_decline(int u) { contains(u) ? decline(u) : push(u); }
void clear() {
for (int u : c)
id[u] = -1;
c.clear();
}
void fill() {
for (int u = 0, N = id.size(); u < N; u++) {
if (!contains(u)) {
push(u);
}
}
}
private:
static int parent(int i) { return (i - 1) >> 1; }
static int child(int i) { return i << 1 | 1; }
void exchange(int i, int j) { swap(id[c[i]], id[c[j]]), swap(c[i], c[j]); }
void heapify_up(int i) {
while (i > 0 && comp(c[i], c[parent(i)])) { // while c[i] < c[parent(i)]
exchange(i, parent(i)), i = parent(i);
}
}
void heapify_down(int i) {
int k, S = c.size();
while ((k = child(i)) < S) {
if (k + 1 < S && !comp(c[k], c[k + 1])) // if c[rchild(i)] <= c[lchild(i)]
k++;
if (!comp(c[k], c[i])) // break if c[i] <= c[minchild(i)]
break;
exchange(i, k), i = k;
}
}
};
// Pairing heap over the unique integers [0...N)
// By default a min-heap, but you'll usually need a custom compare.
template <typename Compare = less<>>
struct pairing_int_heap {
struct node_t {
int parent = 0, child = 0, next = 0, prev = 0;
}; // elements are shifted by 1 to allow 0 to be used as a scratchpad
vector<node_t> node;
int root = 0;
Compare comp;
explicit pairing_int_heap(int N = 0, const Compare& comp = Compare())
: node(N + 1), comp(comp) {}
bool empty() const { return root == 0; }
bool contains(int u) const { return u++, node[u].parent != 0; }
int top() const { return root - 1; }
void push(int u) {
assert(!contains(u)), u++;
node[u].parent = -1;
root = safe_meld(root, u);
}
int pop() {
assert(!empty());
int u = root;
root = two_pass_pairing(u);
node[root].parent = -1;
node[u] = node_t();
return u - 1;
}
void improve(int u) {
assert(!empty() && contains(u)), u++;
if (u != root && do_comp(u, node[u].parent)) {
take(u), root = meld(root, u);
}
}
void push_or_improve(int u) {
if (contains(u)) {
improve(u);
} else {
push(u);
}
}
void adjust(int u) {
erase(u);
push(u);
}
void erase(int u) {
assert(contains(u)), u++;
if (u == root) {
pop();
} else {
take(u);
int v = two_pass_pairing(u);
root = safe_meld(root, v);
node[root].parent = -1;
node[u] = node_t();
}
}
void clear() {
if (!empty()) {
clear_rec(root), root = 0;
}
}
void fill() {
for (int u = 0, N = node.size() - 1; u < N; u++) {
if (!contains(u)) {
push(u);
}
}
}
private:
bool do_comp(int u, int v) const { return comp(u - 1, v - 1); }
int meld(int u, int v) { return do_comp(u, v) ? splice(u, v) : splice(v, u); }
int safe_meld(int u, int v) {
if (u == 0 || v == 0 || u == v)
return u ? u : v;
return meld(u, v);
}
int splice(int u, int v) {
node[node[u].child].prev = v;
node[v].next = node[u].child, node[u].child = v;
return node[v].prev = node[v].parent = u;
}
void take(int u) {
assert(node[u].parent > 0);
if (node[node[u].parent].child == u) {
node[node[u].parent].child = node[u].next;
} else {
node[node[u].prev].next = node[u].next;
}
node[node[u].next].prev = node[u].prev;
}
int two_pass_pairing(int n) {
if (node[n].child == 0)
return 0;
int u = node[n].child, v = node[u].next, w;
while (v && node[v].next) {
w = node[node[v].next].next;
u = node[u].next = v = meld(v, node[v].next);
v = node[v].next = w;
}
u = node[n].child, v = node[u].next;
while (v) {
w = node[v].next, u = meld(u, v), v = w;
}
return u;
}
void clear_rec(int u) {
for (int v = node[u].child, w = node[v].next; v; v = w, w = node[v].next) {
clear_rec(v);
}
node[u] = node_t();
}
};
// Collection of R mergeable pairing heaps over the unique integers [0...N)
// By default min-heaps, but you'll usually need a custom compare.
template <typename Compare = less<>>
struct pairing_int_heaps {
struct node_t {
int parent = 0, child = 0, next = 0, prev = 0;
}; // elements are shifted by 1 to allow 0 to be used as a scratchpad
vector<int> root;
vector<node_t> node;
Compare comp;
explicit pairing_int_heaps(int R = 0, int N = 0, const Compare& comp = Compare())
: root(R, 0), node(N + 1), comp(comp) {}
bool empty(int h) const { return root[h] == 0; }
bool contains(int u) const { return u++, node[u].parent != 0; }
int top(int h) const { return root[h] - 1; }
void push(int h, int u) {
assert(!contains(u)), u++;
node[u].parent = -1;
root[h] = safe_meld(root[h], u);
}
int pop(int h) {
assert(!empty(h));
int u = root[h];
root[h] = two_pass_pairing(u);
node[root[h]].parent = -1;
node[u] = node_t();
return u - 1;
}
void improve(int h, int u) {
assert(!empty(h) && contains(u)), u++;
if (u != root[h] && do_comp(u, node[u].parent)) {
take(u), root[h] = meld(root[h], u);
}
}
void push_or_improve(int h, int u) {
if (contains(u)) {
improve(h, u);
} else {
push(h, u);
}
}
void adjust(int h, int u) {
erase(h, u);
push(h, u);
}
void erase(int h, int u) {
assert(!empty(h) && contains(u)), u++;
if (u == root[h]) {
pop();
} else {
take(u);
int v = two_pass_pairing(u);
root[h] = safe_meld(root[h], v);
node[root[h]].parent = -1;
node[u] = node_t();
}
}
void merge(int h, int g) {
int r = safe_meld(root[h], root[g]);
root[g] = 0, root[h] = r;
}
void clear(int h) {
if (!empty(h)) {
clear_rec(root[h]), root[h] = 0;
}
}
void fill(int h) {
for (int u = 0, N = node.size() - 1; u < N; u++) {
if (!contains(u)) {
push(h, u);
}
}
}
void fill_each() {
assert(root.size() + 1 == node.size());
for (int h = 0, u = 0, N = node.size() - 1; u <= N; h++, u++) {
if (!contains(u)) {
push(h, u);
}
}
}
private:
bool do_comp(int u, int v) const { return comp(u - 1, v - 1); }
int meld(int u, int v) { return do_comp(u, v) ? splice(u, v) : splice(v, u); }
int safe_meld(int u, int v) {
if (u == 0 || v == 0 || u == v)
return u ? u : v;
return meld(u, v);
}
int splice(int u, int v) {
node[node[u].child].prev = v;
node[v].next = node[u].child, node[u].child = v;
return node[v].prev = node[v].parent = u;
}
void take(int u) {
assert(node[u].parent > 0);
if (node[node[u].parent].child == u) {
node[node[u].parent].child = node[u].next;
} else {
node[node[u].prev].next = node[u].next;
}
node[node[u].next].prev = node[u].prev;
}
int two_pass_pairing(int n) {
if (node[n].child == 0)
return 0;
int u = node[n].child, v = node[u].next, w;
while (v && node[v].next) {
w = node[node[v].next].next;
u = node[u].next = v = meld(v, node[v].next);
v = node[v].next = w;
}
u = node[n].child, v = node[u].next;
while (v) {
w = node[v].next, u = meld(u, v), v = w;
}
return u;
}
void clear_rec(int u) {
for (int v = node[u].child, w = node[v].next; v; v = w, w = node[v].next) {
clear_rec(v);
}
node[u] = node_t();
}
};
/**
* A skew heap designed specifically for the minimum arborescence problem.
* Might also be applicable to other problems requiring connected component contraction.
*
* Context: you want to represent a group of connected components in a graph with V nodes
* and E edges, where edges contain values/costs and want to support the operation of
* merging connected components efficiently, adding new edges, popping min edges,
* and adding lazily a value to all edges in a subcomponent.
*
* Internally there are V "heaps" and E "nodes", which represent, respectively, the
* represented graph's nodes and edges. The heaps correspond to connected components.
*/
template <typename T, typename Compare = less<>>
struct lazy_skew_int_heaps {
struct node_t {
int child[2] = {};
T cost = {}, lazy = {};
}; // elements are shifted by 1 to allow 0 to be used as a scratchpad
vector<int> root;
vector<node_t> node;
Compare comp;
explicit lazy_skew_int_heaps(int R = 0, int E = 0, const Compare& comp = Compare())
: root(R), node(E + 1), comp(comp) {}
bool empty(int h) const { return root[h] == 0; }
auto top(int h) {
pushdown(root[h]);
return make_pair(root[h] - 1, node[root[h]].cost);
}
void update(int h, T delta) {
assert(!empty(h));
node[root[h]].lazy += delta;
pushdown(root[h]);
}
void push(int h, int u, T cost) {
assert(u >= 0), u++;
node[u].cost = cost;
root[h] = safe_meld(root[h], u);
}
void pop(int h) {
assert(!empty(h));
pushdown(root[h]);
auto [l, r] = node[root[h]].child;
node[root[h]] = node_t();
root[h] = safe_meld(l, r);
}
void merge(int h, int a, int b) { // merge heaps a and b into position h
assert(h == a || h == b || root[h] == 0);
int r = safe_meld(root[a], root[b]);
root[a] = root[b] = 0, root[h] = r;
}
private:
void pushdown(int a) {
auto [l, r] = node[a].child;
node[a].cost += node[a].lazy;
node[l].lazy += node[a].lazy;
node[r].lazy += node[a].lazy;
node[a].lazy = node[0].lazy = 0;
}
int safe_meld(int u, int v) {
if (u == 0 || v == 0 || u == v)
return u ? u : v;
return meld(u, v);
}
int meld(int a, int b) {
pushdown(a), pushdown(b);
if (comp(node[b].cost, node[a].cost)) {
swap(a, b);
}
swap(node[a].child[0], node[a].child[1] = safe_meld(b, node[a].child[1]));
return a;
}
};
// Edmonds-Karp augmenting paths for mincost flow. O(V+ElogV) or (V²) per augmentation
// For min-cost flow problems with one source, one sink. Requires pairing_int_heap
// Three initializers. You must call *one* before calling a mincost_flow() function
// dag_init(s, t) if the graph is a DAG
// spfa_init(s, t) if the graph has possibly negative costs
// dijkstra_init(s, t) if the graph has non-negative costs
// Usage:
// int s = ..., t = ...;
// mcmflow<int, long> mcf(V);
// for (edges...) { mcf.add(u, v, cap, cost); }
// bool st_path_exists = mcf.*_init(s, t);
// auto [flow, cost, augmentations] = mcf.mincost_flow*(s, t, ...);
template <typename Flow = int64_t, typename Cost = int64_t, typename FlowSum = Flow,
typename CostSum = Cost>
struct mcmflow {
struct Edge {
int node[2];
Flow cap, flow = 0;
Cost cost;
};
int V, E = 0;
vector<vector<int>> res;
vector<Edge> edge;
explicit mcmflow(int V) : V(V), res(V), pi(V, 0), heap(V, dist) {}
void add(int u, int v, Flow capacity, Cost cost) {
assert(0 <= u && u < V && 0 <= v && v < V && u != v && capacity > 0);
res[u].push_back(E++), edge.push_back({{u, v}, capacity, 0, cost});
res[v].push_back(E++), edge.push_back({{v, u}, 0, 0, -cost});
}
using heap_t = pairing_int_heap<less_container<vector<CostSum>>>;
vector<CostSum> dist, pi;
vector<int> prev;
heap_t heap;
static inline constexpr Flow flowinf = numeric_limits<Flow>::max() / 2;
static inline constexpr FlowSum flowsuminf = numeric_limits<FlowSum>::max() / 2;
static inline constexpr CostSum costsuminf = numeric_limits<CostSum>::max() / 3;
// First augmenting path on a DAG in O(V+E) with topological sort
bool dag_init(int s, int t) {
dist.assign(V, costsuminf);
prev.assign(V, -1);
dist[s] = 0;
vector<int> deg(V), topo(V);
int B = 0;
for (int e = 0; e < E; e += 2) {
int v = edge[e].node[1];
deg[v]++;
}
for (int u = 0; u < V; u++) {
if (deg[u] == 0) {
topo[B++] = u;
}
}
for (int i = 0; i < B; i++) {
int u = topo[i];
for (int e : res[u]) {
if (e % 2 == 0) { // forward edge
int v = edge[e].node[1];
CostSum w = dist[u] + edge[e].cost;
if (edge[e].flow < edge[e].cap && dist[v] > w) {
dist[v] = w;
prev[v] = e;
}
if (--deg[v] == 0) {
topo[B++] = v;
}
}
}
}
reprice();
return prev[t] != -1;
}
// First augmenting path with SPFA in O(V+E) expected time.
bool spfa_init(int s, int t) {
dist.assign(V, costsuminf);
prev.assign(V, -1);
dist[s] = 0;
vector<bool> in_queue(V, false);
list<int> Q;
Q.push_back(s);
do {
int u = Q.front();
Q.pop_front(), in_queue[u] = false;
for (auto e : res[u]) {
int v = edge[e].node[1];
CostSum w = dist[u] + edge[e].cost;
if (edge[e].flow < edge[e].cap && dist[v] > w) {
dist[v] = w;
prev[v] = e;
if (!in_queue[v]) {
if (Q.empty() || dist[v] <= dist[Q.front()]) {
Q.push_front(v);
} else {
Q.push_back(v);
}
in_queue[v] = true;
}
}
}
} while (!Q.empty());
reprice();
return prev[t] != -1;
}
// First augmenting path with dijkstra (also the regular augmentor) in O(E log V)
bool dijkstra(int s, int t) {
dist.assign(V, costsuminf);
prev.assign(V, -1);
dist[s] = 0;
heap.push(s);
do {
auto u = heap.pop();
for (int e : res[u]) {
int v = edge[e].node[1];
CostSum w = min(dist[u] + pi[u] - pi[v] + edge[e].cost, costsuminf);
if (edge[e].flow < edge[e].cap && dist[v] > w) {
dist[v] = w;
prev[v] = e;
heap.push_or_improve(v);
}
}
} while (!heap.empty() && heap.top() != t);
heap.clear();
reprice();
return prev[t] != -1;
}
void reprice() {
for (int u = 0; u < V; u++) {
pi[u] = min(dist[u] + pi[u], costsuminf);
}
}
auto path(int v) const {
vector<int> path;
while (prev[v] != -1) {
path.push_back(prev[v]);
v = edge[prev[v]].node[0];
}
return path;
}
// Augment until we get F flow, C cost, P augmenting paths, or hit minimum cut.
auto slope(int s, int t, FlowSum F, CostSum C, int P) {
FlowSum sflow = 0;
CostSum scost = 0;
int paths = 0;
vector<tuple<FlowSum, CostSum, CostSum>> line = {{0, 0, 0}};
if (prev[t] == -1 || F <= 0 || C <= 0 || P <= 0) {
return line;
}
do {
auto augmenting_path = path(t);
Flow df = min(F - sflow, FlowSum(flowinf));
CostSum dc = 0;
for (int e : augmenting_path) {
df = min(df, edge[e].cap - edge[e].flow);
dc += edge[e].cost;
}
if (dc > 0 && df > (C - scost) / dc) {
df = (C - scost) / dc;
if (df == 0) {
break; // can't augment without busting C
}
}
auto [lf, lc, ldc] = line.back();
if (dc == ldc) {
line.pop_back();
}
line.emplace_back(lf + df, lc + df * dc, dc);
sflow += df;
scost += df * dc;
paths++;
for (int e : augmenting_path) {
edge[e].flow += df;
edge[e ^ 1].flow -= df;
}
} while (sflow < F && scost < C && paths < P && dijkstra(s, t));
return line;
}
auto slope(int s, int t) { return slope(s, t, flowsuminf, costsuminf, INT_MAX); }
auto mincost_flow(int s, int t, FlowSum F, CostSum C, int P) {
FlowSum sflow = 0;
CostSum scost = 0;
int paths = 0;
if (prev[t] == -1 || F <= 0 || C <= 0 || P <= 0) {
return make_tuple(sflow, scost, paths);
}
do {
auto augmenting_path = path(t);
Flow df = min(F - sflow, FlowSum(flowinf));
CostSum dc = 0;
for (int e : augmenting_path) {
df = min(df, edge[e].cap - edge[e].flow);
dc += edge[e].cost;
}
if (dc > 0 && df > (C - scost) / dc) {
df = (C - scost) / dc;
if (df == 0) {
break; // can't augment without busting C
}
}
sflow += df;
scost += df * dc;
paths++;
for (int e : augmenting_path) {
edge[e].flow += df;
edge[e ^ 1].flow -= df;
}
} while (sflow < F && scost < C && paths < P && dijkstra(s, t));
return make_tuple(sflow, scost, paths);
}
auto mincost_flow(int s, int t) {
return mincost_flow(s, t, flowsuminf, costsuminf, INT_MAX);
}
// reset the flow network; you must call *_init() again
void clear_flow() {
for (int e = 0; e < E; e++) {
edge[e].flow = 0;
}
pi.assign(V, 0);
}
Flow get_flow(int e) const { return edge[2 * e].flow; }
bool left_of_mincut(int u) const { return dist[u] < costsuminf; }
};
int main() {
cin.tie(0)->sync_with_stdio(0);
int n, k; cin >> n >> k;
int s = 0;
int t = 1;
mcmflow<int, long long> mcf(n + k + 2);
for (int i = 0; i < n; i++) {
mcf.add(0, i + 2, 1, 0);
}
rep(i,0,n) {
rep(j,0,k) {
ll cij; cin >> cij;
mcf.add(i + 2, j + n + 2, 1, cij);
}
}
for (int i = 0; i < k; i++) {
ll aj; cin >> aj;
mcf.add(n + 2 + i, 1, aj, 0);
}
bool st_path_exists = mcf.dijkstra(s, t);
auto [flow, cost, augmentations] = mcf.mincost_flow(s, t);
cout << cost << endl;
}
Details
Tip: Click on the bar to expand more detailed information
Test #1:
score: 100
Accepted
time: 1ms
memory: 3620kb
input:
6 2 1 2 1 3 1 4 1 5 1 6 1 7 3 4
output:
12
result:
ok answer is '12'
Test #2:
score: 0
Accepted
time: 1ms
memory: 3548kb
input:
3 3 1 2 3 2 4 6 6 5 4 1 1 1
output:
8
result:
ok answer is '8'
Test #3:
score: 0
Accepted
time: 68ms
memory: 4264kb
input:
1000 10 734 303 991 681 755 155 300 483 702 442 237 256 299 675 671 757 112 853 759 233 979 340 288 377 718 199 935 666 576 842 537 363 592 349 494 961 864 727 84 813 340 78 600 492 118 421 478 925 552 617 517 589 716 7 928 638 258 297 706 787 266 746 913 978 436 859 701 951 137 44 815 336 471 720 2...
output:
92039
result:
ok answer is '92039'
Test #4:
score: 0
Accepted
time: 2159ms
memory: 7088kb
input:
5000 10 14 114 254 832 38 904 25 147 998 785 917 694 750 372 379 887 247 817 999 117 802 15 799 515 316 42 69 247 95 144 727 398 509 725 682 456 369 656 693 955 923 1 681 631 962 826 233 963 289 856 165 491 488 832 111 950 853 791 929 240 509 843 667 970 469 260 447 477 161 431 514 903 627 236 144 3...
output:
461878
result:
ok answer is '461878'
Test #5:
score: -100
Time Limit Exceeded
input:
10000 10 307 205 765 487 504 526 10 581 234 583 448 443 39 992 976 363 335 588 588 169 920 787 896 822 47 358 230 631 136 299 141 159 414 852 922 945 513 76 111 189 616 104 83 792 24 68 164 975 615 472 150 108 848 517 7 153 107 283 452 165 94 370 910 662 226 720 975 214 324 407 636 65 963 859 590 3 ...