aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt
blob: 0b24f2ddd10cc55bfec3ad247bb36bc4e3185c60 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
op {
  graph_op_name: "SparseApplyProximalAdagrad"
  in_arg {
    name: "var"
    description: <<END
Should be from a Variable().
END
  }
  in_arg {
    name: "accum"
    description: <<END
Should be from a Variable().
END
  }
  in_arg {
    name: "lr"
    description: <<END
Learning rate. Must be a scalar.
END
  }
  in_arg {
    name: "l1"
    description: <<END
L1 regularization. Must be a scalar.
END
  }
  in_arg {
    name: "l2"
    description: <<END
L2 regularization. Must be a scalar.
END
  }
  in_arg {
    name: "grad"
    description: <<END
The gradient.
END
  }
  in_arg {
    name: "indices"
    description: <<END
A vector of indices into the first dimension of var and accum.
END
  }
  out_arg {
    name: "out"
    description: <<END
Same as "var".
END
  }
  attr {
    name: "use_locking"
    description: <<END
If True, updating of the var and accum tensors will be protected by
a lock; otherwise the behavior is undefined, but may exhibit less contention.
END
  }
  summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
  description: <<END
That is for rows we have grad for, we update var and accum as follows:
$$accum += grad * grad$$
$$prox_v = var$$
$$prox_v -= lr * grad * (1 / sqrt(accum))$$
$$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$
END
}