blob: 524b5c5a47dd6570d7cb7b59775babcdd2b1d19d (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
op {
graph_op_name: "SparseApplyFtrl"
in_arg {
name: "var"
description: <<END
Should be from a Variable().
END
}
in_arg {
name: "accum"
description: <<END
Should be from a Variable().
END
}
in_arg {
name: "linear"
description: <<END
Should be from a Variable().
END
}
in_arg {
name: "grad"
description: <<END
The gradient.
END
}
in_arg {
name: "indices"
description: <<END
A vector of indices into the first dimension of var and accum.
END
}
in_arg {
name: "lr"
description: <<END
Scaling factor. Must be a scalar.
END
}
in_arg {
name: "l1"
description: <<END
L1 regularization. Must be a scalar.
END
}
in_arg {
name: "l2"
description: <<END
L2 regularization. Must be a scalar.
END
}
in_arg {
name: "lr_power"
description: <<END
Scaling factor. Must be a scalar.
END
}
out_arg {
name: "out"
description: <<END
Same as "var".
END
}
attr {
name: "use_locking"
description: <<END
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
END
}
summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
description: <<END
That is for rows we have grad for, we update var, accum and linear as follows:
accum_new = accum + grad * grad
linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
accum = accum_new
END
}
|