diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 826623d6..dda9dd56 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,7 +1,7 @@ { "ImportPath": "k8s.io/sample-controller", - "GoVersion": "go1.12", - "GodepVersion": "v80-k8s-r1", + "GoVersion": "go1.11", + "GodepVersion": "v80", "Packages": [ "./..." ], @@ -160,1063 +160,1063 @@ }, { "ImportPath": "k8s.io/api/admissionregistration/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/apps/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/apps/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/apps/v1beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/auditregistration/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/authentication/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/authentication/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/authorization/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/authorization/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/autoscaling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/autoscaling/v2beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/autoscaling/v2beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/batch/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/batch/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/batch/v2alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/certificates/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/coordination/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/coordination/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/core/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/events/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/extensions/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/networking/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/networking/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/node/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/node/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/policy/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/rbac/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/rbac/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/rbac/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/scheduling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/scheduling/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/scheduling/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/settings/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/storage/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/storage/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/api/storage/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "abf273d0e3378f540359565745ae19a00bfd47e1" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/meta", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/internalversion", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion/queryparams", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/fields", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/labels", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/schema", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/selection", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/types", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/cache", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/clock", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/diff", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/errors", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/framer", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/intstr", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/json", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/naming", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/net", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/runtime", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/sets", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation/field", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/wait", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/yaml", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/version", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/pkg/watch", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "4519e5924e99cd805e0e5e1dc8e43170c3f94909" }, { "ImportPath": "k8s.io/client-go/discovery", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/discovery/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/apps", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/auditregistration", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/auditregistration/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v2beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v2beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/batch", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/batch/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/batch/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/batch/v2alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/certificates", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/certificates/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/coordination", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/coordination/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/coordination/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/core", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/core/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/events", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/events/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/extensions", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/extensions/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/internalinterfaces", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/networking", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/networking/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/networking/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/node", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/node/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/node/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/policy", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/policy/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/rbac", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/scheduling", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/scheduling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/scheduling/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/scheduling/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/settings", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/settings/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/storage", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/scheme", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/node/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/node/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/node/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/auditregistration/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v2beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v2beta2", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/batch/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/batch/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/batch/v2alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/certificates/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/coordination/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/coordination/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/core/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/events/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/extensions/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/networking/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/networking/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/node/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/node/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/policy/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/scheduling/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/scheduling/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/scheduling/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/settings/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/pkg/version", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/exec", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/rest", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/rest/watch", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/testing", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/auth", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/cache", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/metrics", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/pager", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/record", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/record/util", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/tools/reference", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/transport", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/util/cert", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/util/connrotation", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/util/homedir", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/util/keyutil", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/util/retry", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/client-go/util/workqueue", - "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "Rev": "c1a984fa82ed6cdffe97561c24b9e3a432975c7f" }, { "ImportPath": "k8s.io/klog", diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index 23712547..72e046bc 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=admissionregistration.k8s.io diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index cd84007b..c2817406 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index f763ac1a..63105778 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index ae4a1908..6c7d13d4 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta2 diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go index 5caa960a..851aed3a 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=auditregistration.k8s.io diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index ad5ba643..fda48c01 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index 2f824e72..a142c5fc 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index 5be4427e..4f676633 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index 88afc0fe..fab5f49d 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 70cb1607..07b94d7a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index 18dbf5a5..5bd7918d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta1 diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go index d5ac8a49..181b009d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta2 diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index 70cb1607..07b94d7a 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 5dd34c6d..17b674b6 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index 0c4ec7f7..f2171222 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2alpha1 diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index 771a1294..d56b45df 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go index dbcff17a..8933afb6 100644 --- a/vendor/k8s.io/api/coordination/v1/doc.go +++ b/vendor/k8s.io/api/coordination/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=coordination.k8s.io diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go index cf14fc68..1a88b3b3 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=coordination.k8s.io diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 2c72ec2d..edc9b4d6 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -97,4 +97,10 @@ const ( // This annotation will be used to compute the in-cluster network programming latency SLI, see // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" + + // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated + // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. + // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or + // CSI Backend for a volume plugin on a specific node. + MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" ) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 1c2c6fc0..d4e880ef 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -16,6 +16,7 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // Package v1 is the v1 version of the core API. package v1 diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go index 99991994..0ba4d919 100644 --- a/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=events.k8s.io diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index f763ac1a..63105778 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index b608499f..fc464631 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/k8s.io/api/networking/v1beta1/doc.go index 3330c00d..ff40fd5d 100644 --- a/vendor/k8s.io/api/networking/v1beta1/doc.go +++ b/vendor/k8s.io/api/networking/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go new file mode 100644 index 00000000..ecd814d6 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1alpha1 diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go new file mode 100644 index 00000000..16f5af92 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -0,0 +1,696 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto + + It has these top-level messages: + RuntimeClass + RuntimeClassList + RuntimeClassSpec +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } +func (*RuntimeClassSpec) ProtoMessage() {} +func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") + proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") +} +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) + i += copy(dAtA[i:], m.RuntimeHandler) + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RuntimeClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RuntimeClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuntimeClassSpec) Size() (n int) { + var l int + _ = l + l = len(m.RuntimeHandler) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassSpec{`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 421 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x6b, 0xd4, 0x40, + 0x14, 0xc7, 0x33, 0xb5, 0x85, 0x75, 0x5a, 0x4b, 0xc9, 0x41, 0xc2, 0x1e, 0xa6, 0x65, 0x0f, 0x52, + 0x04, 0x67, 0xdc, 0x22, 0xe2, 0x49, 0x30, 0x5e, 0x14, 0x2b, 0x42, 0xbc, 0x89, 0x07, 0x27, 0xc9, + 0x33, 0x19, 0xb3, 0xc9, 0x0c, 0x99, 0x49, 0xc0, 0x9b, 0x1f, 0xc1, 0x2f, 0xa4, 0xe7, 0x3d, 0xf6, + 0xd8, 0x53, 0x71, 0xe3, 0x17, 0x91, 0x99, 0x64, 0xbb, 0xdb, 0x2e, 0xc5, 0xbd, 0xe5, 0xbd, 0xf9, + 0xff, 0x7f, 0xef, 0xfd, 0x5f, 0xf0, 0xab, 0xe2, 0x85, 0xa6, 0x42, 0xb2, 0xa2, 0x89, 0xa1, 0xae, + 0xc0, 0x80, 0x66, 0x2d, 0x54, 0xa9, 0xac, 0xd9, 0xf0, 0xc0, 0x95, 0x60, 0x95, 0x4c, 0x81, 0xb5, + 0x53, 0x3e, 0x53, 0x39, 0x9f, 0xb2, 0x0c, 0x2a, 0xa8, 0xb9, 0x81, 0x94, 0xaa, 0x5a, 0x1a, 0xe9, + 0x07, 0xbd, 0x92, 0x72, 0x25, 0xa8, 0x55, 0xd2, 0xa5, 0x72, 0xfc, 0x24, 0x13, 0x26, 0x6f, 0x62, + 0x9a, 0xc8, 0x92, 0x65, 0x32, 0x93, 0xcc, 0x19, 0xe2, 0xe6, 0xab, 0xab, 0x5c, 0xe1, 0xbe, 0x7a, + 0xd0, 0xf8, 0xd9, 0x6a, 0x64, 0xc9, 0x93, 0x5c, 0x54, 0x50, 0x7f, 0x67, 0xaa, 0xc8, 0x6c, 0x43, + 0xb3, 0x12, 0x0c, 0x67, 0xed, 0xc6, 0xf8, 0x31, 0xbb, 0xcb, 0x55, 0x37, 0x95, 0x11, 0x25, 0x6c, + 0x18, 0x9e, 0xff, 0xcf, 0xa0, 0x93, 0x1c, 0x4a, 0x7e, 0xdb, 0x37, 0xf9, 0x8d, 0xf0, 0x41, 0xd4, + 0x4b, 0x5e, 0xcf, 0xb8, 0xd6, 0xfe, 0x17, 0x3c, 0xb2, 0x4b, 0xa5, 0xdc, 0xf0, 0x00, 0x9d, 0xa0, + 0xd3, 0xfd, 0xb3, 0xa7, 0x74, 0x75, 0x8b, 0x6b, 0x36, 0x55, 0x45, 0x66, 0x1b, 0x9a, 0x5a, 0x35, + 0x6d, 0xa7, 0xf4, 0x43, 0xfc, 0x0d, 0x12, 0xf3, 0x1e, 0x0c, 0x0f, 0xfd, 0xf9, 0xd5, 0xb1, 0xd7, + 0x5d, 0x1d, 0xe3, 0x55, 0x2f, 0xba, 0xa6, 0xfa, 0xe7, 0x78, 0x57, 0x2b, 0x48, 0x82, 0x1d, 0x47, + 0x7f, 0x4c, 0xef, 0xba, 0x34, 0x5d, 0xdf, 0xeb, 0xa3, 0x82, 0x24, 0x3c, 0x18, 0xb8, 0xbb, 0xb6, + 0x8a, 0x1c, 0x65, 0xf2, 0x0b, 0xe1, 0xa3, 0x75, 0xe1, 0xb9, 0xd0, 0xc6, 0xff, 0xbc, 0x11, 0x82, + 0x6e, 0x17, 0xc2, 0xba, 0x5d, 0x84, 0xa3, 0x61, 0xd4, 0x68, 0xd9, 0x59, 0x0b, 0xf0, 0x0e, 0xef, + 0x09, 0x03, 0xa5, 0x0e, 0x76, 0x4e, 0xee, 0x9d, 0xee, 0x9f, 0x3d, 0xda, 0x2e, 0x41, 0xf8, 0x60, + 0x40, 0xee, 0xbd, 0xb5, 0xe6, 0xa8, 0x67, 0x4c, 0xa2, 0x9b, 0xeb, 0xdb, 0x64, 0xfe, 0x4b, 0x7c, + 0x38, 0xfc, 0xb6, 0x37, 0xbc, 0x4a, 0x67, 0x50, 0xbb, 0x10, 0xf7, 0xc3, 0x87, 0x03, 0xe1, 0x30, + 0xba, 0xf1, 0x1a, 0xdd, 0x52, 0x87, 0x74, 0xbe, 0x20, 0xde, 0xc5, 0x82, 0x78, 0x97, 0x0b, 0xe2, + 0xfd, 0xe8, 0x08, 0x9a, 0x77, 0x04, 0x5d, 0x74, 0x04, 0x5d, 0x76, 0x04, 0xfd, 0xe9, 0x08, 0xfa, + 0xf9, 0x97, 0x78, 0x9f, 0x46, 0xcb, 0x35, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x94, 0x34, 0x0e, + 0xef, 0x30, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto new file mode 100644 index 00000000..ca4e5e53 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + optional RuntimeClassSpec spec = 2; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +message RuntimeClassSpec { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + optional string runtimeHandler = 1; +} + diff --git a/vendor/k8s.io/api/node/v1alpha1/register.go b/vendor/k8s.io/api/node/v1alpha1/register.go new file mode 100644 index 00000000..b6082142 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go new file mode 100644 index 00000000..2ce67c11 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +type RuntimeClassSpec struct { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..a51fa525 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_RuntimeClassSpec = map[string]string{ + "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", + "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", +} + +func (RuntimeClassSpec) SwaggerDoc() map[string]string { + return map_RuntimeClassSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..0d63110b --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,101 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { + if in == nil { + return nil + } + out := new(RuntimeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RuntimeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { + if in == nil { + return nil + } + out := new(RuntimeClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassSpec. +func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { + if in == nil { + return nil + } + out := new(RuntimeClassSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go new file mode 100644 index 00000000..f67a379c --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1beta1 diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go new file mode 100644 index 00000000..27251a8a --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -0,0 +1,564 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto + + It has these top-level messages: + RuntimeClass + RuntimeClassList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") +} +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) + i += copy(dAtA[i:], m.Handler) + return i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RuntimeClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Handler) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RuntimeClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Handler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 389 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6a, 0xdb, 0x40, + 0x14, 0x85, 0x35, 0x2e, 0xc6, 0xae, 0xdc, 0x52, 0xa3, 0x4d, 0x8d, 0x17, 0x63, 0x63, 0x28, 0xb8, + 0x0b, 0xcf, 0xd4, 0xa6, 0x94, 0x2e, 0x8b, 0xba, 0x69, 0x4b, 0x4b, 0x41, 0xcb, 0x90, 0x45, 0x46, + 0xd2, 0x8d, 0x34, 0x91, 0xa5, 0x11, 0x9a, 0x91, 0x20, 0xbb, 0x3c, 0x42, 0xf6, 0x79, 0x95, 0x3c, + 0x80, 0x97, 0x5e, 0x7a, 0x65, 0x62, 0xe5, 0x45, 0x82, 0x7e, 0xfc, 0x43, 0x8c, 0x49, 0x76, 0xba, + 0xe7, 0x9e, 0x73, 0xee, 0x87, 0x18, 0xfd, 0x47, 0xf0, 0x5d, 0x12, 0x2e, 0x68, 0x90, 0xda, 0x90, + 0x44, 0xa0, 0x40, 0xd2, 0x0c, 0x22, 0x57, 0x24, 0xb4, 0x5e, 0xb0, 0x98, 0xd3, 0x48, 0xb8, 0x40, + 0xb3, 0xa9, 0x0d, 0x8a, 0x4d, 0xa9, 0x07, 0x11, 0x24, 0x4c, 0x81, 0x4b, 0xe2, 0x44, 0x28, 0x61, + 0x7c, 0xac, 0x8c, 0x84, 0xc5, 0x9c, 0x14, 0x46, 0x52, 0x1b, 0xfb, 0x13, 0x8f, 0x2b, 0x3f, 0xb5, + 0x89, 0x23, 0x42, 0xea, 0x09, 0x4f, 0xd0, 0xd2, 0x6f, 0xa7, 0x97, 0xe5, 0x54, 0x0e, 0xe5, 0x57, + 0xd5, 0xd3, 0xff, 0xba, 0x3f, 0x18, 0x32, 0xc7, 0xe7, 0x11, 0x24, 0xd7, 0x34, 0x0e, 0xbc, 0x42, + 0x90, 0x34, 0x04, 0xc5, 0x68, 0x76, 0x74, 0xbd, 0x4f, 0x4f, 0xa5, 0x92, 0x34, 0x52, 0x3c, 0x84, + 0xa3, 0xc0, 0xb7, 0x97, 0x02, 0xd2, 0xf1, 0x21, 0x64, 0xcf, 0x73, 0xa3, 0x3b, 0xa4, 0xbf, 0xb3, + 0x2a, 0xcb, 0xcf, 0x39, 0x93, 0xd2, 0xb8, 0xd0, 0xdb, 0x05, 0x94, 0xcb, 0x14, 0xeb, 0xa1, 0x21, + 0x1a, 0x77, 0x66, 0x5f, 0xc8, 0xfe, 0x57, 0xec, 0xba, 0x49, 0x1c, 0x78, 0x85, 0x20, 0x49, 0xe1, + 0x26, 0xd9, 0x94, 0xfc, 0xb7, 0xaf, 0xc0, 0x51, 0xff, 0x40, 0x31, 0xd3, 0x58, 0xac, 0x07, 0x5a, + 0xbe, 0x1e, 0xe8, 0x7b, 0xcd, 0xda, 0xb5, 0x1a, 0x9f, 0xf5, 0x96, 0xcf, 0x22, 0x77, 0x0e, 0x49, + 0xaf, 0x31, 0x44, 0xe3, 0xb7, 0xe6, 0x87, 0xda, 0xde, 0xfa, 0x55, 0xc9, 0xd6, 0x76, 0x3f, 0xba, + 0x47, 0x7a, 0xf7, 0x90, 0xee, 0x2f, 0x97, 0xca, 0x38, 0x3f, 0x22, 0x24, 0xaf, 0x23, 0x2c, 0xd2, + 0x25, 0x5f, 0xb7, 0x3e, 0xd8, 0xde, 0x2a, 0x07, 0x74, 0x7f, 0xf4, 0x26, 0x57, 0x10, 0xca, 0x5e, + 0x63, 0xf8, 0x66, 0xdc, 0x99, 0x7d, 0x22, 0x27, 0xde, 0x01, 0x39, 0xe4, 0x32, 0xdf, 0xd7, 0x8d, + 0xcd, 0xdf, 0x45, 0xd6, 0xaa, 0x2a, 0xcc, 0xc9, 0x62, 0x83, 0xb5, 0xe5, 0x06, 0x6b, 0xab, 0x0d, + 0xd6, 0x6e, 0x72, 0x8c, 0x16, 0x39, 0x46, 0xcb, 0x1c, 0xa3, 0x55, 0x8e, 0xd1, 0x43, 0x8e, 0xd1, + 0xed, 0x23, 0xd6, 0xce, 0x5a, 0x75, 0xe3, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x93, 0x68, 0xe5, + 0x0d, 0xb5, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto new file mode 100644 index 00000000..9082fbd3 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + optional string handler = 2; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + diff --git a/vendor/k8s.io/api/node/v1beta1/register.go b/vendor/k8s.io/api/node/v1beta1/register.go new file mode 100644 index 00000000..3c3b61ba --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go new file mode 100644 index 00000000..993c6e50 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..8bfa304e --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..98b5d480 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { + if in == nil { + return nil + } + out := new(RuntimeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RuntimeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { + if in == nil { + return nil + } + out := new(RuntimeClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index d1062837..e68544dd 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index d268804e..8d212294 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 2e7b2016..70d3c0e9 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index 81bdb731..8f920749 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go index 45a969a6..3be300cf 100644 --- a/vendor/k8s.io/api/scheduling/v1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index 9a00a09c..476ab6f6 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go index 3cfee4f6..a3e8eeaa 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index 9bc2b9fd..80a21b6d 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index ff8bb34c..5731541d 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go index c6a9b9e2..dd7b9b2d 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index a5eafb2c..7bb7bb8d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 88ead621..b3a04300 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -38,6 +38,7 @@ import ( extensions "k8s.io/client-go/informers/extensions" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" networking "k8s.io/client-go/informers/networking" + node "k8s.io/client-go/informers/node" policy "k8s.io/client-go/informers/policy" rbac "k8s.io/client-go/informers/rbac" scheduling "k8s.io/client-go/informers/scheduling" @@ -198,6 +199,7 @@ type SharedInformerFactory interface { Events() events.Interface Extensions() extensions.Interface Networking() networking.Interface + Node() node.Interface Policy() policy.Interface Rbac() rbac.Interface Scheduling() scheduling.Interface @@ -249,6 +251,10 @@ func (f *sharedInformerFactory) Networking() networking.Interface { return networking.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Node() node.Interface { + return node.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Policy() policy.Interface { return policy.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index cf4e9ff0..fd5811cd 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -40,6 +40,8 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -217,6 +219,14 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil + // Group=node.k8s.io, Version=v1alpha1 + case nodev1alpha1.SchemeGroupVersion.WithResource("runtimeclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1alpha1().RuntimeClasses().Informer()}, nil + + // Group=node.k8s.io, Version=v1beta1 + case nodev1beta1.SchemeGroupVersion.WithResource("runtimeclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1beta1().RuntimeClasses().Informer()}, nil + // Group=policy, Version=v1beta1 case policyv1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil diff --git a/vendor/k8s.io/client-go/informers/node/interface.go b/vendor/k8s.io/client-go/informers/node/interface.go new file mode 100644 index 00000000..97736937 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/interface.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package node + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "k8s.io/client-go/informers/node/v1alpha1" + v1beta1 "k8s.io/client-go/informers/node/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go new file mode 100644 index 00000000..c5644295 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // RuntimeClasses returns a RuntimeClassInformer. + RuntimeClasses() RuntimeClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// RuntimeClasses returns a RuntimeClassInformer. +func (v *version) RuntimeClasses() RuntimeClassInformer { + return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go new file mode 100644 index 00000000..31edf930 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + nodev1alpha1 "k8s.io/api/node/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/node/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// RuntimeClassInformer provides access to a shared informer and lister for +// RuntimeClasses. +type RuntimeClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RuntimeClassLister +} + +type runtimeClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1alpha1().RuntimeClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1alpha1().RuntimeClasses().Watch(options) + }, + }, + &nodev1alpha1.RuntimeClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&nodev1alpha1.RuntimeClass{}, f.defaultInformer) +} + +func (f *runtimeClassInformer) Lister() v1alpha1.RuntimeClassLister { + return v1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go new file mode 100644 index 00000000..44a1defb --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // RuntimeClasses returns a RuntimeClassInformer. + RuntimeClasses() RuntimeClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// RuntimeClasses returns a RuntimeClassInformer. +func (v *version) RuntimeClasses() RuntimeClassInformer { + return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go new file mode 100644 index 00000000..6972993a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + nodev1beta1 "k8s.io/api/node/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/node/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// RuntimeClassInformer provides access to a shared informer and lister for +// RuntimeClasses. +type RuntimeClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RuntimeClassLister +} + +type runtimeClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1beta1().RuntimeClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NodeV1beta1().RuntimeClasses().Watch(options) + }, + }, + &nodev1beta1.RuntimeClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&nodev1beta1.RuntimeClass{}, f.defaultInformer) +} + +func (f *runtimeClassInformer) Lister() v1beta1.RuntimeClassLister { + return v1beta1.NewRuntimeClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index dbaeffb1..fb889e6d 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -43,6 +43,8 @@ import ( extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" @@ -83,6 +85,8 @@ type Interface interface { ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface + NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface + NodeV1beta1() nodev1beta1.NodeV1beta1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface @@ -123,6 +127,8 @@ type Clientset struct { extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client networkingV1 *networkingv1.NetworkingV1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client + nodeV1beta1 *nodev1beta1.NodeV1beta1Client policyV1beta1 *policyv1beta1.PolicyV1beta1Client rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client @@ -251,6 +257,16 @@ func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Inter return c.networkingV1beta1 } +// NodeV1alpha1 retrieves the NodeV1alpha1Client +func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { + return c.nodeV1alpha1 +} + +// NodeV1beta1 retrieves the NodeV1beta1Client +func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { + return c.nodeV1beta1 +} + // PolicyV1beta1 retrieves the PolicyV1beta1Client func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return c.policyV1beta1 @@ -414,6 +430,14 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -493,6 +517,8 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) + cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) + cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) cs.rbacV1 = rbacv1.NewForConfigOrDie(c) cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) @@ -535,6 +561,8 @@ func New(c rest.Interface) *Clientset { cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) + cs.nodeV1alpha1 = nodev1alpha1.New(c) + cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 9a13a650..f4ac2b8b 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -70,6 +70,10 @@ import ( fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake" + nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + fakenodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake" + nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" + fakenodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1/fake" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" @@ -252,6 +256,16 @@ func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Inter return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake} } +// NodeV1alpha1 retrieves the NodeV1alpha1Client +func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { + return &fakenodev1alpha1.FakeNodeV1alpha1{Fake: &c.Fake} +} + +// NodeV1beta1 retrieves the NodeV1beta1Client +func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { + return &fakenodev1beta1.FakeNodeV1beta1{Fake: &c.Fake} +} + // PolicyV1beta1 retrieves the PolicyV1beta1Client func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 77056d83..62e07afb 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -42,6 +42,8 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -87,6 +89,8 @@ var localSchemeBuilder = runtime.SchemeBuilder{ extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, + nodev1alpha1.AddToScheme, + nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, rbacv1.AddToScheme, rbacv1beta1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 65968335..8346d26a 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -42,6 +42,8 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -87,6 +89,8 @@ var localSchemeBuilder = runtime.SchemeBuilder{ extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, networkingv1beta1.AddToScheme, + nodev1alpha1.AddToScheme, + nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, rbacv1.AddToScheme, rbacv1beta1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go new file mode 100644 index 00000000..df51baa4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go new file mode 100644 index 00000000..16f44399 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go new file mode 100644 index 00000000..21ab9de3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNodeV1alpha1 struct { + *testing.Fake +} + +func (c *FakeNodeV1alpha1) RuntimeClasses() v1alpha1.RuntimeClassInterface { + return &FakeRuntimeClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNodeV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go new file mode 100644 index 00000000..3c8b0098 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/node/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRuntimeClasses implements RuntimeClassInterface +type FakeRuntimeClasses struct { + Fake *FakeNodeV1alpha1 +} + +var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", Version: "v1alpha1", Resource: "runtimeclasses"} + +var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1alpha1", Kind: "RuntimeClass"} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *FakeRuntimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RuntimeClass), err +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *FakeRuntimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1alpha1.RuntimeClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RuntimeClassList{ListMeta: obj.(*v1alpha1.RuntimeClassList).ListMeta} + for _, item := range obj.(*v1alpha1.RuntimeClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *FakeRuntimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RuntimeClass), err +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RuntimeClass), err +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *FakeRuntimeClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRuntimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RuntimeClassList{}) + return err +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *FakeRuntimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1alpha1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RuntimeClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..fcef31d1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go new file mode 100644 index 00000000..863f2d4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/node/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1alpha1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1alpha1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1alpha1Client { + return &NodeV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go new file mode 100644 index 00000000..044460ec --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/node/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go new file mode 100644 index 00000000..77110195 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go new file mode 100644 index 00000000..16f44399 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go new file mode 100644 index 00000000..36976ce5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNodeV1beta1 struct { + *testing.Fake +} + +func (c *FakeNodeV1beta1) RuntimeClasses() v1beta1.RuntimeClassInterface { + return &FakeRuntimeClasses{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNodeV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go new file mode 100644 index 00000000..201d7426 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/api/node/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRuntimeClasses implements RuntimeClassInterface +type FakeRuntimeClasses struct { + Fake *FakeNodeV1beta1 +} + +var runtimeclassesResource = schema.GroupVersionResource{Group: "node.k8s.io", Version: "v1beta1", Resource: "runtimeclasses"} + +var runtimeclassesKind = schema.GroupVersionKind{Group: "node.k8s.io", Version: "v1beta1", Kind: "RuntimeClass"} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *FakeRuntimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RuntimeClass), err +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *FakeRuntimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1beta1.RuntimeClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.RuntimeClassList{ListMeta: obj.(*v1beta1.RuntimeClassList).ListMeta} + for _, item := range obj.(*v1beta1.RuntimeClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *FakeRuntimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RuntimeClass), err +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *FakeRuntimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RuntimeClass), err +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *FakeRuntimeClasses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRuntimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.RuntimeClassList{}) + return err +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *FakeRuntimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1beta1.RuntimeClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.RuntimeClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go new file mode 100644 index 00000000..669dd028 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go new file mode 100644 index 00000000..52683e20 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/node/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1beta1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1beta1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1beta1Client struct { + restClient rest.Interface +} + +func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1beta1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1beta1Client { + return &NodeV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go new file mode 100644 index 00000000..b3f7c497 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/node/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..a65c208f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// RuntimeClassListerExpansion allows custom methods to be added to +// RuntimeClassLister. +type RuntimeClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go new file mode 100644 index 00000000..af3f02b9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/node/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RuntimeClassLister helps list RuntimeClasses. +type RuntimeClassLister interface { + // List lists all RuntimeClasses in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) + // Get retrieves the RuntimeClass from the index for a given name. + Get(name string) (*v1alpha1.RuntimeClass, error) + RuntimeClassListerExpansion +} + +// runtimeClassLister implements the RuntimeClassLister interface. +type runtimeClassLister struct { + indexer cache.Indexer +} + +// NewRuntimeClassLister returns a new RuntimeClassLister. +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { + return &runtimeClassLister{indexer: indexer} +} + +// List lists all RuntimeClasses in the indexer. +func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RuntimeClass)) + }) + return ret, err +} + +// Get retrieves the RuntimeClass from the index for a given name. +func (s *runtimeClassLister) Get(name string) (*v1alpha1.RuntimeClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("runtimeclass"), name) + } + return obj.(*v1alpha1.RuntimeClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go new file mode 100644 index 00000000..a6744055 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// RuntimeClassListerExpansion allows custom methods to be added to +// RuntimeClassLister. +type RuntimeClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go new file mode 100644 index 00000000..be642b99 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/node/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RuntimeClassLister helps list RuntimeClasses. +type RuntimeClassLister interface { + // List lists all RuntimeClasses in the indexer. + List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) + // Get retrieves the RuntimeClass from the index for a given name. + Get(name string) (*v1beta1.RuntimeClass, error) + RuntimeClassListerExpansion +} + +// runtimeClassLister implements the RuntimeClassLister interface. +type runtimeClassLister struct { + indexer cache.Indexer +} + +// NewRuntimeClassLister returns a new RuntimeClassLister. +func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { + return &runtimeClassLister{indexer: indexer} +} + +// List lists all RuntimeClasses in the indexer. +func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.RuntimeClass)) + }) + return ret, err +} + +// Get retrieves the RuntimeClass from the index for a given name. +func (s *runtimeClassLister) Get(name string) (*v1beta1.RuntimeClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("runtimeclass"), name) + } + return obj.(*v1beta1.RuntimeClass), nil +} diff --git a/vendor/k8s.io/code-generator/Godeps/Godeps.json b/vendor/k8s.io/code-generator/Godeps/Godeps.json index 4e5beb40..cc209607 100644 --- a/vendor/k8s.io/code-generator/Godeps/Godeps.json +++ b/vendor/k8s.io/code-generator/Godeps/Godeps.json @@ -154,6 +154,86 @@ "ImportPath": "golang.org/x/tools/internal/semver", "Rev": "7f7074d5bcfd282eb16bc382b0bb3da762461985" }, + { + "ImportPath": "gonum.org/v1/gonum/blas", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/blas/blas64", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/blas/gonum", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/floats", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph/internal/linear", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph/internal/ordered", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph/internal/set", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph/internal/uid", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph/simple", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph/topo", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/graph/traverse", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/internal/asm/c128", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/internal/asm/f32", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/internal/asm/f64", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/internal/math32", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/lapack", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/lapack/gonum", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/lapack/lapack64", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, + { + "ImportPath": "gonum.org/v1/gonum/mat", + "Rev": "cebdade430ccb61c1feba4878085f6cf8cb3320e" + }, { "ImportPath": "k8s.io/gengo/args", "Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95" diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go index 55507322..e85ceb8d 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -25,16 +25,20 @@ import ( "os" "os/exec" "path/filepath" + "sort" "strings" + flag "github.com/spf13/pflag" + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/simple" + "gonum.org/v1/gonum/graph/topo" + "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/parser" "k8s.io/gengo/types" - - flag "github.com/spf13/pflag" ) type Generator struct { @@ -202,6 +206,18 @@ func Run(g *Generator) { c.Verify = g.Common.VerifyOnly c.FileTypes["protoidl"] = NewProtoFile() + // order package by imports, importees first + deps := deps(c, protobufNames.packages) + order, err := importOrder(deps) + if err != nil { + log.Fatalf("Failed to order packages by imports: %v", err) + } + topologicalPos := map[string]int{} + for i, p := range order { + topologicalPos[p] = i + } + sort.Sort(positionOrder{topologicalPos, protobufNames.packages}) + var vendoredOutputPackages, localOutputPackages generator.Packages for _, p := range protobufNames.packages { if _, ok := nonOutputPackages[p.Name()]; ok { @@ -347,3 +363,66 @@ func Run(g *Generator) { } } } + +func deps(c *generator.Context, pkgs []*protobufPackage) map[string][]string { + ret := map[string][]string{} + for _, p := range pkgs { + for _, d := range c.Universe[p.PackagePath].Imports { + ret[p.PackagePath] = append(ret[p.PackagePath], d.Path) + } + } + return ret +} + +func importOrder(deps map[string][]string) ([]string, error) { + nodes := map[string]graph.Node{} + names := map[int64]string{} + g := simple.NewDirectedGraph() + for pkg, imports := range deps { + for _, imp := range imports { + if _, found := nodes[pkg]; !found { + n := g.NewNode() + g.AddNode(n) + nodes[pkg] = n + names[n.ID()] = pkg + } + if _, found := nodes[imp]; !found { + n := g.NewNode() + g.AddNode(n) + nodes[imp] = n + names[n.ID()] = imp + } + g.SetEdge(g.NewEdge(nodes[imp], nodes[pkg])) + } + } + + ret := []string{} + sorted, err := topo.Sort(g) + if err != nil { + return nil, err + } + for _, n := range sorted { + ret = append(ret, names[n.ID()]) + fmt.Println("topological order", names[n.ID()]) + } + return ret, nil +} + +type positionOrder struct { + pos map[string]int + elements []*protobufPackage +} + +func (o positionOrder) Len() int { + return len(o.elements) +} + +func (o positionOrder) Less(i, j int) bool { + return o.pos[o.elements[i].PackagePath] < o.pos[o.elements[j].PackagePath] +} + +func (o positionOrder) Swap(i, j int) { + x := o.elements[i] + o.elements[i] = o.elements[j] + o.elements[j] = x +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/AUTHORS new file mode 100644 index 00000000..dd17494a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/AUTHORS @@ -0,0 +1,70 @@ +# This is the official list of gonum authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Brendan Tracey +Bill Gray +Bill Noon +Chad Kunde +Chih-Wei Chang +Chris Tessum +Dan Kortschak +Daniel Fireman +David Samborski +Davor Kapsa +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Google Inc +Gustaf Johansson +Iakov Davydov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeff Juozapaitis +Jeremy Atkinson +Jonas Kahler +Jonathan J Lawlor +Jonathan Schroeder +Joseph Watson +Josh Wilson +Julien Roland +Kent English +Kevin C. Zimmerman +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Matthieu Di Mercurio +Max Halford +MinJae Kwon +Or Rikon +Pontus Melke +Renée French +Robin Eklind +Samuel Kelemen +Sam Zaydel +Scott Holden +Sebastien Binet +source{d} +Shawn Smith +Spencer Lyon +Steve McCoy +Takeshi Yoneda +The University of Adelaide +The University of Minnesota +The University of Washington +Tobin Harding +Vladimír Chalupecký +Yevgeniy Vahlis diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/CONTRIBUTORS new file mode 100644 index 00000000..007d13b7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/CONTRIBUTORS @@ -0,0 +1,73 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the gonum +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees would be listed here +# but not in AUTHORS, because Google would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Andrew Brampton +Brendan Tracey +Bill Gray +Bill Noon +Chad Kunde +Chih-Wei Chang +Chris Tessum +Dan Kortschak +Daniel Fireman +David Samborski +Davor Kapsa +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Gustaf Johansson +Iakov Davydov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeff Juozapaitis +Jeremy Atkinson +Jonas Kahler +Jonathan J Lawlor +Jonathan Schroeder +Joseph Watson +Josh Wilson +Julien Roland +Kent English +Kevin C. Zimmerman +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Matthieu Di Mercurio +Max Halford +MinJae Kwon +Or Rikon +Pontus Melke +Renée French +Robin Eklind +Samuel Kelemen +Sam Zaydel +Scott Holden +Sebastien Binet +Shawn Smith +Spencer Lyon +Steve McCoy +Takeshi Yoneda +Tobin Harding +Vladimír Chalupecký +Yevgeniy Vahlis diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/LICENSE b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/LICENSE new file mode 100644 index 00000000..5f1c3f9c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/LICENSE @@ -0,0 +1,23 @@ +Copyright ©2013 The Gonum Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/README.md b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/README.md new file mode 100644 index 00000000..e9d33eee --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/README.md @@ -0,0 +1,47 @@ +# Gonum BLAS [![GoDoc](https://godoc.org/gonum.org/v1/gonum/blas?status.svg)](https://godoc.org/gonum.org/v1/gonum/blas) + +A collection of packages to provide BLAS functionality for the [Go programming +language](http://golang.org) + +## Installation +```sh + go get gonum.org/v1/gonum/blas/... +``` + +## Packages + +### blas + +Defines [BLAS API](http://www.netlib.org/blas/blast-forum/cinterface.pdf) split in several +interfaces. + +### blas/gonum + +Go implementation of the BLAS API (incomplete, implements the `float32` and `float64` API). + +### blas/blas64 and blas/blas32 + +Wrappers for an implementation of the double (i.e., `float64`) and single (`float32`) +precision real parts of the BLAS API. + +```Go +package main + +import ( + "fmt" + + "gonum.org/v1/gonum/blas/blas64" +) + +func main() { + v := blas64.Vector{Inc: 1, Data: []float64{1, 1, 1}} + fmt.Println("v has length:", blas64.Nrm2(len(v.Data), v)) +} +``` + +### blas/cblas128 and blas/cblas64 + +Wrappers for an implementation of the double (i.e., `complex128`) and single (`complex64`) +precision complex parts of the blas API. + +Currently blas/cblas64 and blas/cblas128 require gonum.org/v1/netlib/blas. diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas.go new file mode 100644 index 00000000..ec61c456 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas.go @@ -0,0 +1,287 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate ./conversions.bash + +package blas + +// Flag constants indicate Givens transformation H matrix state. +type Flag int + +const ( + Identity Flag = -2 // H is the identity matrix; no rotation is needed. + Rescaling Flag = -1 // H specifies rescaling. + OffDiagonal Flag = 0 // Off-diagonal elements of H are non-unit. + Diagonal Flag = 1 // Diagonal elements of H are non-unit. +) + +// SrotmParams contains Givens transformation parameters returned +// by the Float32 Srotm method. +type SrotmParams struct { + Flag + H [4]float32 // Column-major 2 by 2 matrix. +} + +// DrotmParams contains Givens transformation parameters returned +// by the Float64 Drotm method. +type DrotmParams struct { + Flag + H [4]float64 // Column-major 2 by 2 matrix. +} + +// Transpose is used to specify the transposition operation for a +// routine. +type Transpose int + +const ( + NoTrans Transpose = 111 + iota + Trans + ConjTrans +) + +// Uplo is used to specify whether the matrix is an upper or lower +// triangular matrix. +type Uplo int + +const ( + All Uplo = 120 + iota + Upper + Lower +) + +// Diag is used to specify whether the matrix is a unit or non-unit +// triangular matrix. +type Diag int + +const ( + NonUnit Diag = 131 + iota + Unit +) + +// Side is used to specify from which side a multiplication operation +// is performed. +type Side int + +const ( + Left Side = 141 + iota + Right +) + +// Float32 implements the single precision real BLAS routines. +type Float32 interface { + Float32Level1 + Float32Level2 + Float32Level3 +} + +// Float32Level1 implements the single precision real BLAS Level 1 routines. +type Float32Level1 interface { + Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 + Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 + Sdot(n int, x []float32, incX int, y []float32, incY int) float32 + Snrm2(n int, x []float32, incX int) float32 + Sasum(n int, x []float32, incX int) float32 + Isamax(n int, x []float32, incX int) int + Sswap(n int, x []float32, incX int, y []float32, incY int) + Scopy(n int, x []float32, incX int, y []float32, incY int) + Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) + Srotg(a, b float32) (c, s, r, z float32) + Srotmg(d1, d2, b1, b2 float32) (p SrotmParams, rd1, rd2, rb1 float32) + Srot(n int, x []float32, incX int, y []float32, incY int, c, s float32) + Srotm(n int, x []float32, incX int, y []float32, incY int, p SrotmParams) + Sscal(n int, alpha float32, x []float32, incX int) +} + +// Float32Level2 implements the single precision real BLAS Level 2 routines. +type Float32Level2 interface { + Sgemv(tA Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Sgbmv(tA Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Strmv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) + Stbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) + Stpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) + Strsv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) + Stbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) + Stpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) + Ssymv(ul Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Ssbmv(ul Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Sspmv(ul Uplo, n int, alpha float32, ap []float32, x []float32, incX int, beta float32, y []float32, incY int) + Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) + Ssyr(ul Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) + Sspr(ul Uplo, n int, alpha float32, x []float32, incX int, ap []float32) + Ssyr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) + Sspr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32) +} + +// Float32Level3 implements the single precision real BLAS Level 3 routines. +type Float32Level3 interface { + Sgemm(tA, tB Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Ssymm(s Side, ul Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Ssyrk(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) + Ssyr2k(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Strmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) + Strsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) +} + +// Float64 implements the single precision real BLAS routines. +type Float64 interface { + Float64Level1 + Float64Level2 + Float64Level3 +} + +// Float64Level1 implements the double precision real BLAS Level 1 routines. +type Float64Level1 interface { + Ddot(n int, x []float64, incX int, y []float64, incY int) float64 + Dnrm2(n int, x []float64, incX int) float64 + Dasum(n int, x []float64, incX int) float64 + Idamax(n int, x []float64, incX int) int + Dswap(n int, x []float64, incX int, y []float64, incY int) + Dcopy(n int, x []float64, incX int, y []float64, incY int) + Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) + Drotg(a, b float64) (c, s, r, z float64) + Drotmg(d1, d2, b1, b2 float64) (p DrotmParams, rd1, rd2, rb1 float64) + Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) + Drotm(n int, x []float64, incX int, y []float64, incY int, p DrotmParams) + Dscal(n int, alpha float64, x []float64, incX int) +} + +// Float64Level2 implements the double precision real BLAS Level 2 routines. +type Float64Level2 interface { + Dgemv(tA Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dgbmv(tA Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dtrmv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) + Dtbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) + Dtpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) + Dtrsv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) + Dtbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) + Dtpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) + Dsymv(ul Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dsbmv(ul Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dspmv(ul Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) + Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) + Dsyr(ul Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) + Dspr(ul Uplo, n int, alpha float64, x []float64, incX int, ap []float64) + Dsyr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) + Dspr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64) +} + +// Float64Level3 implements the double precision real BLAS Level 3 routines. +type Float64Level3 interface { + Dgemm(tA, tB Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dsymm(s Side, ul Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dsyrk(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) + Dsyr2k(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dtrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) + Dtrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) +} + +// Complex64 implements the single precision complex BLAS routines. +type Complex64 interface { + Complex64Level1 + Complex64Level2 + Complex64Level3 +} + +// Complex64Level1 implements the single precision complex BLAS Level 1 routines. +type Complex64Level1 interface { + Cdotu(n int, x []complex64, incX int, y []complex64, incY int) (dotu complex64) + Cdotc(n int, x []complex64, incX int, y []complex64, incY int) (dotc complex64) + Scnrm2(n int, x []complex64, incX int) float32 + Scasum(n int, x []complex64, incX int) float32 + Icamax(n int, x []complex64, incX int) int + Cswap(n int, x []complex64, incX int, y []complex64, incY int) + Ccopy(n int, x []complex64, incX int, y []complex64, incY int) + Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) + Cscal(n int, alpha complex64, x []complex64, incX int) + Csscal(n int, alpha float32, x []complex64, incX int) +} + +// Complex64Level2 implements the single precision complex BLAS routines Level 2 routines. +type Complex64Level2 interface { + Cgemv(tA Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Cgbmv(tA Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Ctrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) + Ctbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) + Ctpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) + Ctrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) + Ctbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) + Ctpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) + Chemv(ul Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Chbmv(ul Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Chpmv(ul Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) + Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Cher(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) + Chpr(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64) + Cher2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Chpr2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) +} + +// Complex64Level3 implements the single precision complex BLAS Level 3 routines. +type Complex64Level3 interface { + Cgemm(tA, tB Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Csymm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Csyrk(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) + Csyr2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Ctrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) + Ctrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) + Chemm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Cherk(ul Uplo, t Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) + Cher2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) +} + +// Complex128 implements the double precision complex BLAS routines. +type Complex128 interface { + Complex128Level1 + Complex128Level2 + Complex128Level3 +} + +// Complex128Level1 implements the double precision complex BLAS Level 1 routines. +type Complex128Level1 interface { + Zdotu(n int, x []complex128, incX int, y []complex128, incY int) (dotu complex128) + Zdotc(n int, x []complex128, incX int, y []complex128, incY int) (dotc complex128) + Dznrm2(n int, x []complex128, incX int) float64 + Dzasum(n int, x []complex128, incX int) float64 + Izamax(n int, x []complex128, incX int) int + Zswap(n int, x []complex128, incX int, y []complex128, incY int) + Zcopy(n int, x []complex128, incX int, y []complex128, incY int) + Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) + Zscal(n int, alpha complex128, x []complex128, incX int) + Zdscal(n int, alpha float64, x []complex128, incX int) +} + +// Complex128Level2 implements the double precision complex BLAS Level 2 routines. +type Complex128Level2 interface { + Zgemv(tA Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zgbmv(tA Transpose, m, n int, kL int, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Ztrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) + Ztbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) + Ztpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) + Ztrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) + Ztbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) + Ztpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) + Zhemv(ul Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zhbmv(ul Uplo, n, k int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zhpmv(ul Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zher(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) + Zhpr(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128) + Zher2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zhpr2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) +} + +// Complex128Level3 implements the double precision complex BLAS Level 3 routines. +type Complex128Level3 interface { + Zgemm(tA, tB Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zsymm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zsyrk(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) + Zsyr2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Ztrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) + Ztrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) + Zhemm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zherk(ul Uplo, t Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) + Zher2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go new file mode 100644 index 00000000..11dfaafb --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go @@ -0,0 +1,445 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/gonum" +) + +var blas64 blas.Float64 = gonum.Implementation{} + +// Use sets the BLAS float64 implementation to be used by subsequent BLAS calls. +// The default implementation is native.Implementation. +func Use(b blas.Float64) { + blas64 = b +} + +// Implementation returns the current BLAS float64 implementation. +// +// Implementation allows direct calls to the current the BLAS float64 implementation +// giving finer control of parameters. +func Implementation() blas.Float64 { + return blas64 +} + +// Vector represents a vector with an associated element increment. +type Vector struct { + Inc int + Data []float64 +} + +// General represents a matrix using the conventional storage scheme. +type General struct { + Rows, Cols int + Stride int + Data []float64 +} + +// Band represents a band matrix using the band storage scheme. +type Band struct { + Rows, Cols int + KL, KU int + Stride int + Data []float64 +} + +// Triangular represents a triangular matrix using the conventional storage scheme. +type Triangular struct { + N int + Stride int + Data []float64 + Uplo blas.Uplo + Diag blas.Diag +} + +// TriangularBand represents a triangular matrix using the band storage scheme. +type TriangularBand struct { + N, K int + Stride int + Data []float64 + Uplo blas.Uplo + Diag blas.Diag +} + +// TriangularPacked represents a triangular matrix using the packed storage scheme. +type TriangularPacked struct { + N int + Data []float64 + Uplo blas.Uplo + Diag blas.Diag +} + +// Symmetric represents a symmetric matrix using the conventional storage scheme. +type Symmetric struct { + N int + Stride int + Data []float64 + Uplo blas.Uplo +} + +// SymmetricBand represents a symmetric matrix using the band storage scheme. +type SymmetricBand struct { + N, K int + Stride int + Data []float64 + Uplo blas.Uplo +} + +// SymmetricPacked represents a symmetric matrix using the packed storage scheme. +type SymmetricPacked struct { + N int + Data []float64 + Uplo blas.Uplo +} + +// Level 1 + +const negInc = "blas64: negative vector increment" + +// Dot computes the dot product of the two vectors: +// \sum_i x[i]*y[i]. +func Dot(n int, x, y Vector) float64 { + return blas64.Ddot(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Nrm2 computes the Euclidean norm of the vector x: +// sqrt(\sum_i x[i]*x[i]). +// +// Nrm2 will panic if the vector increment is negative. +func Nrm2(n int, x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Dnrm2(n, x.Data, x.Inc) +} + +// Asum computes the sum of the absolute values of the elements of x: +// \sum_i |x[i]|. +// +// Asum will panic if the vector increment is negative. +func Asum(n int, x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Dasum(n, x.Data, x.Inc) +} + +// Iamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Iamax returns -1 if n == 0. +// +// Iamax will panic if the vector increment is negative. +func Iamax(n int, x Vector) int { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Idamax(n, x.Data, x.Inc) +} + +// Swap exchanges the elements of the two vectors: +// x[i], y[i] = y[i], x[i] for all i. +func Swap(n int, x, y Vector) { + blas64.Dswap(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Copy copies the elements of x into the elements of y: +// y[i] = x[i] for all i. +func Copy(n int, x, y Vector) { + blas64.Dcopy(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Axpy adds x scaled by alpha to y: +// y[i] += alpha*x[i] for all i. +func Axpy(n int, alpha float64, x, y Vector) { + blas64.Daxpy(n, alpha, x.Data, x.Inc, y.Data, y.Inc) +} + +// Rotg computes the parameters of a Givens plane rotation so that +// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ +// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ +// where a and b are the Cartesian coordinates of a given point. +// c, s, and r are defined as +// r = ±Sqrt(a^2 + b^2), +// c = a/r, the cosine of the rotation angle, +// s = a/r, the sine of the rotation angle, +// and z is defined such that +// if |a| > |b|, z = s, +// otherwise if c != 0, z = 1/c, +// otherwise z = 1. +func Rotg(a, b float64) (c, s, r, z float64) { + return blas64.Drotg(a, b) +} + +// Rotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +func Rotmg(d1, d2, b1, b2 float64) (p blas.DrotmParams, rd1, rd2, rb1 float64) { + return blas64.Drotmg(d1, d2, b1, b2) +} + +// Rot applies a plane transformation to n points represented by the vectors x +// and y: +// x[i] = c*x[i] + s*y[i], +// y[i] = -s*x[i] + c*y[i], for all i. +func Rot(n int, x, y Vector, c, s float64) { + blas64.Drot(n, x.Data, x.Inc, y.Data, y.Inc, c, s) +} + +// Rotm applies the modified Givens rotation to n points represented by the +// vectors x and y. +func Rotm(n int, x, y Vector, p blas.DrotmParams) { + blas64.Drotm(n, x.Data, x.Inc, y.Data, y.Inc, p) +} + +// Scal scales the vector x by alpha: +// x[i] *= alpha for all i. +// +// Scal will panic if the vector increment is negative. +func Scal(n int, alpha float64, x Vector) { + if x.Inc < 0 { + panic(negInc) + } + blas64.Dscal(n, alpha, x.Data, x.Inc) +} + +// Level 2 + +// Gemv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func Gemv(t blas.Transpose, alpha float64, a General, x Vector, beta float64, y Vector) { + blas64.Dgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Gbmv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, +// where A is an m×n band matrix, x and y are vectors, and alpha and beta are scalars. +func Gbmv(t blas.Transpose, alpha float64, a Band, x Vector, beta float64, y Vector) { + blas64.Dgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Trmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix, and x is a vector. +func Trmv(t blas.Transpose, a Triangular, x Vector) { + blas64.Dtrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular band matrix, and x is a vector. +func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { + blas64.Dtbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x is a vector. +func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { + blas64.Dtpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Trsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Trsv(t blas.Transpose, a Triangular, x Vector) { + blas64.Dtrsv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular band matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { + blas64.Dtbsv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x and b are +// vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { + blas64.Dtpsv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Symv computes +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +func Symv(alpha float64, a Symmetric, x Vector, beta float64, y Vector) { + blas64.Dsymv(a.Uplo, a.N, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Sbmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric band matrix, x and y are vectors, and alpha +// and beta are scalars. +func Sbmv(alpha float64, a SymmetricBand, x Vector, beta float64, y Vector) { + blas64.Dsbmv(a.Uplo, a.N, a.K, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Spmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func Spmv(alpha float64, a SymmetricPacked, x Vector, beta float64, y Vector) { + blas64.Dspmv(a.Uplo, a.N, alpha, a.Data, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Ger performs a rank-1 update +// A += alpha * x * y^T, +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(alpha float64, x, y Vector, a General) { + blas64.Dger(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Syr performs a rank-1 update +// A += alpha * x * x^T, +// where A is an n×n symmetric matrix, x is a vector, and alpha is a scalar. +func Syr(alpha float64, x Vector, a Symmetric) { + blas64.Dsyr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) +} + +// Spr performs the rank-1 update +// A += alpha * x * x^T, +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +func Spr(alpha float64, x Vector, a SymmetricPacked) { + blas64.Dspr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data) +} + +// Syr2 performs a rank-2 update +// A += alpha * x * y^T + alpha * y * x^T, +// where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. +func Syr2(alpha float64, x, y Vector, a Symmetric) { + blas64.Dsyr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Spr2 performs a rank-2 update +// A += alpha * x * y^T + alpha * y * x^T, +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func Spr2(alpha float64, x, y Vector, a SymmetricPacked) { + blas64.Dspr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data) +} + +// Level 3 + +// Gemm computes +// C = alpha * A * B + beta * C, +// where A, B, and C are dense matrices, and alpha and beta are scalars. +// tA and tB specify whether A or B are transposed. +func Gemm(tA, tB blas.Transpose, alpha float64, a, b General, beta float64, c General) { + var m, n, k int + if tA == blas.NoTrans { + m, k = a.Rows, a.Cols + } else { + m, k = a.Cols, a.Rows + } + if tB == blas.NoTrans { + n = b.Cols + } else { + n = b.Rows + } + blas64.Dgemm(tA, tB, m, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Symm performs +// C = alpha * A * B + beta * C, if s == blas.Left, +// C = alpha * B * A + beta * C, if s == blas.Right, +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and +// alpha is a scalar. +func Symm(s blas.Side, alpha float64, a Symmetric, b General, beta float64, c General) { + var m, n int + if s == blas.Left { + m, n = a.N, b.Cols + } else { + m, n = b.Rows, a.N + } + blas64.Dsymm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Syrk performs a symmetric rank-k update +// C = alpha * A * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, +// where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans and +// a k×n matrix otherwise, and alpha and beta are scalars. +func Syrk(t blas.Transpose, alpha float64, a General, beta float64, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + blas64.Dsyrk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) +} + +// Syr2k performs a symmetric rank-2k update +// C = alpha * A * B^T + alpha * B * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * B + alpha * B^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, +// where C is an n×n symmetric matrix, A and B are n×k matrices if t == NoTrans +// and k×n matrices otherwise, and alpha and beta are scalars. +func Syr2k(t blas.Transpose, alpha float64, a, b General, beta float64, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + blas64.Dsyr2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Trmm performs +// B = alpha * A * B, if tA == blas.NoTrans and s == blas.Left, +// B = alpha * A^T * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// B = alpha * B * A, if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * A^T, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is +// a scalar. +func Trmm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { + blas64.Dtrmm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} + +// Trsm solves +// A * X = alpha * B, if tA == blas.NoTrans and s == blas.Left, +// A^T * X = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// X * A = alpha * B, if tA == blas.NoTrans and s == blas.Right, +// X * A^T = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and +// alpha is a scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +func Trsm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { + blas64.Dtrsm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/conv.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/conv.go new file mode 100644 index 00000000..882fd8a7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/conv.go @@ -0,0 +1,277 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import "gonum.org/v1/gonum/blas" + +// GeneralCols represents a matrix using the conventional column-major storage scheme. +type GeneralCols General + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t GeneralCols) From(a General) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if len(t.Data) < (t.Cols-1)*t.Stride+t.Rows { + panic("blas64: short data slice") + } + for i := 0; i < a.Rows; i++ { + for j, v := range a.Data[i*a.Stride : i*a.Stride+a.Cols] { + t.Data[i+j*t.Stride] = v + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t General) From(a GeneralCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if len(t.Data) < (t.Rows-1)*t.Stride+t.Cols { + panic("blas64: short data slice") + } + for j := 0; j < a.Cols; j++ { + for i, v := range a.Data[j*a.Stride : j*a.Stride+a.Rows] { + t.Data[i*t.Stride+j] = v + } + } +} + +// TriangularCols represents a matrix using the conventional column-major storage scheme. +type TriangularCols Triangular + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t TriangularCols) From(a Triangular) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t Triangular) From(a TriangularCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// BandCols represents a matrix using the band column-major storage scheme. +type BandCols Band + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t BandCols) From(a Band) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("blas64: short stride for destination") + } + for i := 0; i < a.Rows; i++ { + for j := max(0, i-a.KL); j < min(i+a.KU+1, a.Cols); j++ { + t.Data[i+t.KU-j+j*t.Stride] = a.Data[j+a.KL-i+i*a.Stride] + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t Band) From(a BandCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("blas64: short stride for destination") + } + for j := 0; j < a.Cols; j++ { + for i := max(0, j-a.KU); i < min(j+a.KL+1, a.Rows); i++ { + t.Data[j+a.KL-i+i*a.Stride] = a.Data[i+t.KU-j+j*t.Stride] + } + } +} + +// TriangularBandCols represents a symmetric matrix using the band column-major storage scheme. +type TriangularBandCols TriangularBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBandCols) From(a TriangularBand) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBand) From(a TriangularBandCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go new file mode 100644 index 00000000..5146f1a1 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go @@ -0,0 +1,153 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import "gonum.org/v1/gonum/blas" + +// SymmetricCols represents a matrix using the conventional column-major storage scheme. +type SymmetricCols Symmetric + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t SymmetricCols) From(a Symmetric) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t Symmetric) From(a SymmetricCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// SymmetricBandCols represents a symmetric matrix using the band column-major storage scheme. +type SymmetricBandCols SymmetricBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBandCols) From(a SymmetricBand) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBand) From(a SymmetricBandCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/doc.go new file mode 100644 index 00000000..04ce4643 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/blas64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blas64 provides a simple interface to the float64 BLAS API. +package blas64 diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/conversions.bash b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/conversions.bash new file mode 100755 index 00000000..d1c0ef0d --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/conversions.bash @@ -0,0 +1,159 @@ +#!/usr/bin/env bash + +# Copyright ©2017 The Gonum Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Generate code for blas32. +echo Generating blas32/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ +\ +>> blas32/conv.go + +echo Generating blas32/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +\ +>> blas32/conv_test.go + +echo Generating blas32/conv_symmetric.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_symmetric.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ +\ +>> blas32/conv_symmetric.go + +echo Generating blas32/conv_symmetric_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_symmetric_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +\ +>> blas32/conv_symmetric_test.go + + +# Generate code for cblas128. +echo Generating cblas128/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ +\ +>> cblas128/conv.go + +echo Generating cblas128/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_test.go + +echo Generating cblas128/conv_symmetric.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_symmetric.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ +\ +>> cblas128/conv_symmetric.go + +echo Generating cblas128/conv_symmetric_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_symmetric_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_symmetric_test.go + +echo Generating cblas128/conv_hermitian.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_hermitian.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ +\ +>> cblas128/conv_hermitian.go + +echo Generating cblas128/conv_hermitian_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_hermitian_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_hermitian_test.go + + +# Generate code for cblas64. +echo Generating cblas64/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ +\ +>> cblas64/conv.go + +echo Generating cblas64/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/cmplx64"_' \ +\ +>> cblas64/conv_test.go + +echo Generating cblas64/conv_hermitian.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_hermitian.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ +\ +>> cblas64/conv_hermitian.go + +echo Generating cblas64/conv_hermitian_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_hermitian_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/cmplx64"_' \ +\ +>> cblas64/conv_hermitian_test.go diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/doc.go new file mode 100644 index 00000000..1e447c9b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/doc.go @@ -0,0 +1,108 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package blas provides interfaces for the BLAS linear algebra standard. + +All methods must perform appropriate parameter checking and panic if +provided parameters that do not conform to the requirements specified +by the BLAS standard. + +Quick Reference Guide to the BLAS from http://www.netlib.org/lapack/lug/node145.html + +This version is modified to remove the "order" option. All matrix operations are +on row-order matrices. + +Level 1 BLAS + + dim scalar vector vector scalars 5-element prefixes + struct + + _rotg ( a, b ) S, D + _rotmg( d1, d2, a, b ) S, D + _rot ( n, x, incX, y, incY, c, s ) S, D + _rotm ( n, x, incX, y, incY, param ) S, D + _swap ( n, x, incX, y, incY ) S, D, C, Z + _scal ( n, alpha, x, incX ) S, D, C, Z, Cs, Zd + _copy ( n, x, incX, y, incY ) S, D, C, Z + _axpy ( n, alpha, x, incX, y, incY ) S, D, C, Z + _dot ( n, x, incX, y, incY ) S, D, Ds + _dotu ( n, x, incX, y, incY ) C, Z + _dotc ( n, x, incX, y, incY ) C, Z + __dot ( n, alpha, x, incX, y, incY ) Sds + _nrm2 ( n, x, incX ) S, D, Sc, Dz + _asum ( n, x, incX ) S, D, Sc, Dz + I_amax( n, x, incX ) s, d, c, z + +Level 2 BLAS + + options dim b-width scalar matrix vector scalar vector prefixes + + _gemv ( trans, m, n, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z + _gbmv ( trans, m, n, kL, kU, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z + _hemv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) C, Z + _hbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) C, Z + _hpmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) C, Z + _symv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) S, D + _sbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) S, D + _spmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) S, D + _trmv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z + _tbmv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z + _tpmv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z + _trsv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z + _tbsv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z + _tpsv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z + + options dim scalar vector vector matrix prefixes + + _ger ( m, n, alpha, x, incX, y, incY, a, lda ) S, D + _geru ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z + _gerc ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z + _her ( uplo, n, alpha, x, incX, a, lda ) C, Z + _hpr ( uplo, n, alpha, x, incX, ap ) C, Z + _her2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) C, Z + _hpr2 ( uplo, n, alpha, x, incX, y, incY, ap ) C, Z + _syr ( uplo, n, alpha, x, incX, a, lda ) S, D + _spr ( uplo, n, alpha, x, incX, ap ) S, D + _syr2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) S, D + _spr2 ( uplo, n, alpha, x, incX, y, incY, ap ) S, D + +Level 3 BLAS + + options dim scalar matrix matrix scalar matrix prefixes + + _gemm ( transA, transB, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _symm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _hemm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z + _syrk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) S, D, C, Z + _herk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) C, Z + _syr2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _her2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z + _trmm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z + _trsm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z + +Meaning of prefixes + + S - float32 C - complex64 + D - float64 Z - complex128 + +Matrix types + + GE - GEneral GB - General Band + SY - SYmmetric SB - Symmetric Band SP - Symmetric Packed + HE - HErmitian HB - Hermitian Band HP - Hermitian Packed + TR - TRiangular TB - Triangular Band TP - Triangular Packed + +Options + + trans = NoTrans, Trans, ConjTrans + uplo = Upper, Lower + diag = Nonunit, Unit + side = Left, Right (A or op(A) on the left, or A or op(A) on the right) + +For real matrices, Trans and ConjTrans have the same meaning. +For Hermitian matrices, trans = Trans is not allowed. +For complex symmetric matrices, trans = ConjTrans is not allowed. +*/ +package blas diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/cmplx.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/cmplx.go new file mode 100644 index 00000000..53180913 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/cmplx.go @@ -0,0 +1,164 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +var ( + _ blas.Complex64 = Implementation{} + _ blas.Complex128 = Implementation{} +) + +// TODO(btracey): Replace this as complex routines are added, and instead +// automatically generate the complex64 routines from the complex128 ones. + +var noComplex = "native: implementation does not implement this routine, see the cgo wrapper in gonum.org/v1/netlib/blas" + +// Level 1 complex64 routines. + +func (Implementation) Cdotu(n int, x []complex64, incX int, y []complex64, incY int) (dotu complex64) { + panic(noComplex) +} +func (Implementation) Cdotc(n int, x []complex64, incX int, y []complex64, incY int) (dotc complex64) { + panic(noComplex) +} +func (Implementation) Scnrm2(n int, x []complex64, incX int) float32 { + panic(noComplex) +} +func (Implementation) Scasum(n int, x []complex64, incX int) float32 { + panic(noComplex) +} +func (Implementation) Icamax(n int, x []complex64, incX int) int { + panic(noComplex) +} +func (Implementation) Cswap(n int, x []complex64, incX int, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Ccopy(n int, x []complex64, incX int, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Cscal(n int, alpha complex64, x []complex64, incX int) { + panic(noComplex) +} +func (Implementation) Csscal(n int, alpha float32, x []complex64, incX int) { + panic(noComplex) +} + +// Level 2 complex64 routines. + +func (Implementation) Cgemv(tA blas.Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Cgbmv(tA blas.Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Ctrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { + panic(noComplex) +} +func (Implementation) Ctbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { + panic(noComplex) +} +func (Implementation) Ctpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []complex64, x []complex64, incX int) { + panic(noComplex) +} +func (Implementation) Ctrsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { + panic(noComplex) +} +func (Implementation) Ctbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { + panic(noComplex) +} +func (Implementation) Ctpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []complex64, x []complex64, incX int) { + panic(noComplex) +} +func (Implementation) Chemv(ul blas.Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Chbmv(ul blas.Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Chpmv(ul blas.Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) { + panic(noComplex) +} +func (Implementation) Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + panic(noComplex) +} +func (Implementation) Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + panic(noComplex) +} +func (Implementation) Cher(ul blas.Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) { + panic(noComplex) +} +func (Implementation) Chpr(ul blas.Uplo, n int, alpha float32, x []complex64, incX int, a []complex64) { + panic(noComplex) +} +func (Implementation) Cher2(ul blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + panic(noComplex) +} +func (Implementation) Chpr2(ul blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) { + panic(noComplex) +} + +// Level 3 complex64 routines. + +func (Implementation) Cgemm(tA, tB blas.Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + panic(noComplex) +} +func (Implementation) Csymm(s blas.Side, ul blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + panic(noComplex) +} +func (Implementation) Csyrk(ul blas.Uplo, t blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) { + panic(noComplex) +} +func (Implementation) Csyr2k(ul blas.Uplo, t blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + panic(noComplex) +} +func (Implementation) Ctrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { + panic(noComplex) +} +func (Implementation) Ctrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { + panic(noComplex) +} +func (Implementation) Chemm(s blas.Side, ul blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + panic(noComplex) +} +func (Implementation) Cherk(ul blas.Uplo, t blas.Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) { + panic(noComplex) +} +func (Implementation) Cher2k(ul blas.Uplo, t blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) { + panic(noComplex) +} + +// Level 3 complex128 routines. + +func (Implementation) Zgemm(tA, tB blas.Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + panic(noComplex) +} +func (Implementation) Zsymm(s blas.Side, ul blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + panic(noComplex) +} +func (Implementation) Zsyrk(ul blas.Uplo, t blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) { + panic(noComplex) +} +func (Implementation) Zsyr2k(ul blas.Uplo, t blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + panic(noComplex) +} +func (Implementation) Ztrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { + panic(noComplex) +} +func (Implementation) Ztrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { + panic(noComplex) +} +func (Implementation) Zhemm(s blas.Side, ul blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + panic(noComplex) +} +func (Implementation) Zherk(ul blas.Uplo, t blas.Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) { + panic(noComplex) +} +func (Implementation) Zher2k(ul blas.Uplo, t blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) { + panic(noComplex) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go new file mode 100644 index 00000000..4147aa88 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go @@ -0,0 +1,265 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "runtime" + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Dgemm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C +// C = alpha * A^T * B + beta * C +// C = alpha * A * B^T + beta * C +// C = alpha * A^T * B^T + beta * C +// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is +// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or +// B are transposed. +func (Implementation) Dgemm(tA, tB blas.Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if tB != blas.NoTrans && tB != blas.Trans && tB != blas.ConjTrans { + panic(badTranspose) + } + aTrans := tA == blas.Trans || tA == blas.ConjTrans + if aTrans { + checkDMatrix('a', k, m, a, lda) + } else { + checkDMatrix('a', m, k, a, lda) + } + bTrans := tB == blas.Trans || tB == blas.ConjTrans + if bTrans { + checkDMatrix('b', n, k, b, ldb) + } else { + checkDMatrix('b', k, n, b, ldb) + } + checkDMatrix('c', m, n, c, ldc) + + // scale c + if beta != 1 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + } + } + + dgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) +} + +func dgemmParallel(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // dgemmParallel computes a parallel matrix multiplication by partitioning + // a and b into sub-blocks, and updating c with the multiplication of the sub-block + // In all cases, + // A = [ A_11 A_12 ... A_1j + // A_21 A_22 ... A_2j + // ... + // A_i1 A_i2 ... A_ij] + // + // and same for B. All of the submatrix sizes are blockSize×blockSize except + // at the edges. + // + // In all cases, there is one dimension for each matrix along which + // C must be updated sequentially. + // Cij = \sum_k Aik Bki, (A * B) + // Cij = \sum_k Aki Bkj, (A^T * B) + // Cij = \sum_k Aik Bjk, (A * B^T) + // Cij = \sum_k Aki Bjk, (A^T * B^T) + // + // This code computes one {i, j} block sequentially along the k dimension, + // and computes all of the {i, j} blocks concurrently. This + // partitioning allows Cij to be updated in-place without race-conditions. + // Instead of launching a goroutine for each possible concurrent computation, + // a number of worker goroutines are created and channels are used to pass + // available and completed cases. + // + // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix + // multiplies, though this code does not copy matrices to attempt to eliminate + // cache misses. + + maxKLen := k + parBlocks := blocks(m, blockSize) * blocks(n, blockSize) + if parBlocks < minParBlock { + // The matrix multiplication is small in the dimensions where it can be + // computed concurrently. Just do it in serial. + dgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + } + + nWorkers := runtime.GOMAXPROCS(0) + if parBlocks < nWorkers { + nWorkers = parBlocks + } + // There is a tradeoff between the workers having to wait for work + // and a large buffer making operations slow. + buf := buffMul * nWorkers + if buf > parBlocks { + buf = parBlocks + } + + sendChan := make(chan subMul, buf) + + // Launch workers. A worker receives an {i, j} submatrix of c, and computes + // A_ik B_ki (or the transposed version) storing the result in c_ij. When the + // channel is finally closed, it signals to the waitgroup that it has finished + // computing. + var wg sync.WaitGroup + for i := 0; i < nWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // Make local copies of otherwise global variables to reduce shared memory. + // This has a noticeable effect on benchmarks in some cases. + alpha := alpha + aTrans := aTrans + bTrans := bTrans + m := m + n := n + for sub := range sendChan { + i := sub.i + j := sub.j + leni := blockSize + if i+leni > m { + leni = m - i + } + lenj := blockSize + if j+lenj > n { + lenj = n - j + } + + cSub := sliceView64(c, ldc, i, j, leni, lenj) + + // Compute A_ik B_kj for all k + for k := 0; k < maxKLen; k += blockSize { + lenk := blockSize + if k+lenk > maxKLen { + lenk = maxKLen - k + } + var aSub, bSub []float64 + if aTrans { + aSub = sliceView64(a, lda, k, i, lenk, leni) + } else { + aSub = sliceView64(a, lda, i, k, leni, lenk) + } + if bTrans { + bSub = sliceView64(b, ldb, j, k, lenj, lenk) + } else { + bSub = sliceView64(b, ldb, k, j, lenk, lenj) + } + dgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) + } + } + }() + } + + // Send out all of the {i, j} subblocks for computation. + for i := 0; i < m; i += blockSize { + for j := 0; j < n; j += blockSize { + sendChan <- subMul{ + i: i, + j: j, + } + } + } + close(sendChan) + wg.Wait() +} + +// dgemmSerial is serial matrix multiply +func dgemmSerial(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + switch { + case !aTrans && !bTrans: + dgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && !bTrans: + dgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case !aTrans && bTrans: + dgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && bTrans: + dgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + default: + panic("unreachable") + } +} + +// dgemmSerial where neither a nor b are transposed +func dgemmSerialNotNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for l, v := range a[i*lda : i*lda+k] { + tmp := alpha * v + if tmp != 0 { + f64.AxpyUnitaryTo(ctmp, tmp, b[l*ldb:l*ldb+n], ctmp) + } + } + } +} + +// dgemmSerial where neither a is transposed and b is not +func dgemmSerialTransNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + btmp := b[l*ldb : l*ldb+n] + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyUnitaryTo(ctmp, tmp, btmp, ctmp) + } + } + } +} + +// dgemmSerial where neither a is not transposed and b is +func dgemmSerialNotTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + atmp := a[i*lda : i*lda+k] + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] += alpha * f64.DotUnitary(atmp, b[j*ldb:j*ldb+k]) + } + } +} + +// dgemmSerial where both are transposed +func dgemmSerialTransTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) + } + } + } +} + +func sliceView64(a []float64, lda, i, j, r, c int) []float64 { + return a[i*lda+j : (i+r-1)*lda+j+c] +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/doc.go new file mode 100644 index 00000000..b0e900e3 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/doc.go @@ -0,0 +1,88 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Ensure changes made to blas/native are reflected in blas/cgo where relevant. + +/* +Package gonum is a Go implementation of the BLAS API. This implementation +panics when the input arguments are invalid as per the standard, for example +if a vector increment is zero. Note that the treatment of NaN values +is not specified, and differs among the BLAS implementations. +gonum.org/v1/gonum/blas/blas64 provides helpful wrapper functions to the BLAS +interface. The rest of this text describes the layout of the data for the input types. + +Note that in the function documentation, x[i] refers to the i^th element +of the vector, which will be different from the i^th element of the slice if +incX != 1. + +See http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html +for more license information. + +Vector arguments are effectively strided slices. They have two input arguments, +a number of elements, n, and an increment, incX. The increment specifies the +distance between elements of the vector. The actual Go slice may be longer +than necessary. +The increment may be positive or negative, except in functions with only +a single vector argument where the increment may only be positive. If the increment +is negative, s[0] is the last element in the slice. Note that this is not the same +as counting backward from the end of the slice, as len(s) may be longer than +necessary. So, for example, if n = 5 and incX = 3, the elements of s are + [0 * * 1 * * 2 * * 3 * * 4 * * * ...] +where ∗ elements are never accessed. If incX = -3, the same elements are +accessed, just in reverse order (4, 3, 2, 1, 0). + +Dense matrices are specified by a number of rows, a number of columns, and a stride. +The stride specifies the number of entries in the slice between the first element +of successive rows. The stride must be at least as large as the number of columns +but may be longer. + [a00 ... a0n a0* ... a1stride-1 a21 ... amn am* ... amstride-1] +Thus, dense[i*ld + j] refers to the {i, j}th element of the matrix. + +Symmetric and triangular matrices (non-packed) are stored identically to Dense, +except that only elements in one triangle of the matrix are accessed. + +Packed symmetric and packed triangular matrices are laid out with the entries +condensed such that all of the unreferenced elements are removed. So, the upper triangular +matrix + [ + 1 2 3 + 0 4 5 + 0 0 6 + ] +and the lower-triangular matrix + [ + 1 0 0 + 2 3 0 + 4 5 6 + ] +will both be compacted as [1 2 3 4 5 6]. The (i, j) element of the original +dense matrix can be found at element i*n - (i-1)*i/2 + j for upper triangular, +and at element i * (i+1) /2 + j for lower triangular. + +Banded matrices are laid out in a compact format, constructed by removing the +zeros in the rows and aligning the diagonals. For example, the matrix + [ + 1 2 3 0 0 0 + 4 5 6 7 0 0 + 0 8 9 10 11 0 + 0 0 12 13 14 15 + 0 0 0 16 17 18 + 0 0 0 0 19 20 + ] + +implicitly becomes (∗ entries are never accessed) + [ + * 1 2 3 + 4 5 6 7 + 8 9 10 11 + 12 13 14 15 + 16 17 18 * + 19 20 * * + ] +which is given to the BLAS routine as [∗ 1 2 3 4 ...]. + +See http://www.crest.iu.edu/research/mtl/reference/html/banded.html +for more information +*/ +package gonum diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go new file mode 100644 index 00000000..acd8f323 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go @@ -0,0 +1,180 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// TODO(Kunde21): Merge these methods back into level2double/level2single when Sgemv assembly kernels are merged into f32. + +// Dgemv computes +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func (Implementation) Dgemv(tA blas.Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(badY) + } + if lda*(m-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + + // Quick return if possible + if m == 0 || n == 0 || (alpha == 0 && beta == 1) { + return + } + + if alpha == 0 { + // First form y = beta * y + if incY > 0 { + Implementation{}.Dscal(lenY, beta, y, incY) + } else { + Implementation{}.Dscal(lenY, beta, y, -incY) + } + return + } + + // Form y = alpha * A * x + y + if tA == blas.NoTrans { + f64.GemvN(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) + return + } + // Cases where a is transposed. + f64.GemvT(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) +} + +// Sgemv computes +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgemv(tA blas.Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(badY) + } + if lda*(m-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + + // Quick return if possible + if m == 0 || n == 0 || (alpha == 0 && beta == 1) { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // First form y = beta * y + if incY > 0 { + Implementation{}.Sscal(lenY, beta, y, incY) + } else { + Implementation{}.Sscal(lenY, beta, y, -incY) + } + + if alpha == 0 { + return + } + + // Form y = alpha * A * x + y + if tA == blas.NoTrans { + if incX == 1 && incY == 1 { + for i := 0; i < m; i++ { + y[i] += alpha * f32.DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + iy := ky + for i := 0; i < m; i++ { + y[iy] += alpha * f32.DotInc(x, a[lda*i:lda*i+n], uintptr(n), uintptr(incX), 1, uintptr(kx), 0) + iy += incY + } + return + } + // Cases where a is transposed. + if incX == 1 && incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[i] + if tmp != 0 { + f32.AxpyUnitaryTo(y, tmp, a[lda*i:lda*i+n], y) + } + } + return + } + ix := kx + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + if tmp != 0 { + f32.AxpyInc(tmp, a[lda*i:lda*i+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + } + ix += incX + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go new file mode 100644 index 00000000..e18b69be --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go @@ -0,0 +1,182 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate ./single_precision.bash + +package gonum + +import "math" + +type Implementation struct{} + +// The following are panic strings used during parameter checks. +const ( + zeroIncX = "blas: zero x index increment" + zeroIncY = "blas: zero y index increment" + + mLT0 = "blas: m < 0" + nLT0 = "blas: n < 0" + kLT0 = "blas: k < 0" + kLLT0 = "blas: kL < 0" + kULT0 = "blas: kU < 0" + + badUplo = "blas: illegal triangle" + badTranspose = "blas: illegal transpose" + badDiag = "blas: illegal diagonal" + badSide = "blas: illegal side" + + badLdA = "blas: bad leading dimension of A" + badLdB = "blas: bad leading dimension of B" + badLdC = "blas: bad leading dimension of C" + + badX = "blas: bad length of x" + badY = "blas: bad length of y" +) + +// [SD]gemm behavior constants. These are kept here to keep them out of the +// way during single precision code genration. +const ( + blockSize = 64 // b x b matrix + minParBlock = 4 // minimum number of blocks needed to go parallel + buffMul = 4 // how big is the buffer relative to the number of workers +) + +// subMul is a common type shared by [SD]gemm. +type subMul struct { + i, j int // index of block +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a > b { + return b + } + return a +} + +func checkSMatrix(name byte, m, n int, a []float32, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < n { + panic("blas: illegal stride of " + string(name)) + } + if len(a) < (m-1)*lda+n { + panic("blas: index of " + string(name) + " out of range") + } +} + +func checkDMatrix(name byte, m, n int, a []float64, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < n { + panic("blas: illegal stride of " + string(name)) + } + if len(a) < (m-1)*lda+n { + panic("blas: index of " + string(name) + " out of range") + } +} + +func checkZMatrix(name byte, m, n int, a []complex128, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic("blas: illegal stride of " + string(name)) + } + if len(a) < (m-1)*lda+n { + panic("blas: insufficient " + string(name) + " matrix slice length") + } +} + +func checkZBandMatrix(name byte, m, n, kL, kU int, ab []complex128, ldab int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if ldab < kL+kU+1 { + panic("blas: illegal stride of band matrix " + string(name)) + } + nRow := min(m, n+kL) + if len(ab) < (nRow-1)*ldab+kL+1+kU { + panic("blas: insufficient " + string(name) + " band matrix slice length") + } +} + +func checkZhbMatrix(name byte, n, k int, ab []complex128, ldab int) { + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if ldab < k+1 { + panic("blas: illegal stride of Hermitian band matrix " + string(name)) + } + if len(ab) < (n-1)*ldab+k+1 { + panic("blas: insufficient " + string(name) + " Hermitian band matrix slice length") + } +} + +func checkZtbMatrix(name byte, n, k int, ab []complex128, ldab int) { + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if ldab < k+1 { + panic("blas: illegal stride of triangular band matrix " + string(name)) + } + if len(ab) < (n-1)*ldab+k+1 { + panic("blas: insufficient " + string(name) + " triangular band matrix slice length") + } +} + +func checkZVector(name byte, n int, x []complex128, incX int) { + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic("blas: insufficient " + string(name) + " vector slice length") + } +} + +// blocks returns the number of divisions of the dimension length with the given +// block size. +func blocks(dim, bsize int) int { + return (dim + bsize - 1) / bsize +} + +// dcabs1 returns |real(z)|+|imag(z)|. +func dcabs1(z complex128) float64 { + return math.Abs(real(z)) + math.Abs(imag(z)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go new file mode 100644 index 00000000..d437b8c8 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go @@ -0,0 +1,442 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/internal/asm/c128" +) + +// Dzasum returns the sum of the absolute values of the elements of x +// \sum_i |Re(x[i])| + |Im(x[i])| +// Dzasum returns 0 if incX is negative. +func (Implementation) Dzasum(n int, x []complex128, incX int) float64 { + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + var sum float64 + if incX == 1 { + if len(x) < n { + panic(badX) + } + for _, v := range x[:n] { + sum += dcabs1(v) + } + return sum + } + if (n-1)*incX >= len(x) { + panic(badX) + } + for i := 0; i < n; i++ { + v := x[i*incX] + sum += dcabs1(v) + } + return sum +} + +// Dznrm2 computes the Euclidean norm of the complex vector x, +// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// This function returns 0 if incX is negative. +func (Implementation) Dznrm2(n int, x []complex128, incX int) float64 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if n < 1 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(badX) + } + var ( + scale float64 + ssq float64 = 1 + ) + if incX == 1 { + for _, v := range x[:n] { + re, im := math.Abs(real(v)), math.Abs(imag(v)) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) + } + for ix := 0; ix < n*incX; ix += incX { + re, im := math.Abs(real(x[ix])), math.Abs(imag(x[ix])) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) +} + +// Izamax returns the index of the first element of x having largest |Re(·)|+|Im(·)|. +// Izamax returns -1 if n is 0 or incX is negative. +func (Implementation) Izamax(n int, x []complex128, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + // Return invalid index. + return -1 + } + if n < 1 { + if n == 0 { + // Return invalid index. + return -1 + } + panic(nLT0) + } + if len(x) <= (n-1)*incX { + panic(badX) + } + idx := 0 + max := dcabs1(x[0]) + if incX == 1 { + for i, v := range x[1:n] { + absV := dcabs1(v) + if absV > max { + max = absV + idx = i + 1 + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + absV := dcabs1(x[ix]) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Zaxpy adds alpha times x to y: +// y[i] += alpha * x[i] for all i +func (Implementation) Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + c128.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (1 - n) * incX + } + if incY < 0 { + iy = (1 - n) * incY + } + c128.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zcopy copies the vector x to vector y. +func (Implementation) Zcopy(n int, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Zdotc computes the dot product +// x^H · y +// of two complex vectors x and y. +func (Implementation) Zdotc(n int, x []complex128, incX int, y []complex128, incY int) complex128 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(badX) + } + if len(y) < n { + panic(badY) + } + return c128.DotcUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(badX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(badY) + } + return c128.DotcInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zdotu computes the dot product +// x^T · y +// of two complex vectors x and y. +func (Implementation) Zdotu(n int, x []complex128, incX int, y []complex128, incY int) complex128 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(badX) + } + if len(y) < n { + panic(badY) + } + return c128.DotuUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(badX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(badY) + } + return c128.DotuInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zdscal scales the vector x by a real scalar alpha. +// Zdscal has no effect if incX < 0. +func (Implementation) Zdscal(n int, alpha float64, x []complex128, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(badX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + x = x[:n] + for i, v := range x { + x[i] = complex(alpha*real(v), alpha*imag(v)) + } + return + } + for ix := 0; ix < n*incX; ix += incX { + v := x[ix] + x[ix] = complex(alpha*real(v), alpha*imag(v)) + } +} + +// Zscal scales the vector x by a complex scalar alpha. +// Zscal has no effect if incX < 0. +func (Implementation) Zscal(n int, alpha complex128, x []complex128, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(badX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + c128.ScalUnitary(alpha, x[:n]) + return + } + c128.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} + +// Zswap exchanges the elements of two complex vectors x and y. +func (Implementation) Zswap(n int, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1double.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1double.go new file mode 100644 index 00000000..84a6f2b0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1double.go @@ -0,0 +1,620 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level1 = Implementation{} + +// Dnrm2 computes the Euclidean norm of a vector, +// sqrt(\sum_i x[i] * x[i]). +// This function returns 0 if incX is negative. +func (Implementation) Dnrm2(n int, x []float64, incX int) float64 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if incX > 0 && (n-1)*incX >= len(x) { + panic(badX) + } + if n < 2 { + if n == 1 { + return math.Abs(x[0]) + } + if n == 0 { + return 0 + } + panic(nLT0) + } + var ( + scale float64 = 0 + sumSquares float64 = 1 + ) + if incX == 1 { + x = x[:n] + for _, v := range x { + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) + } + for ix := 0; ix < n*incX; ix += incX { + val := x[ix] + if val == 0 { + continue + } + absxi := math.Abs(val) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// Dasum computes the sum of the absolute values of the elements of x. +// \sum_i |x[i]| +// Dasum returns 0 if incX is negative. +func (Implementation) Dasum(n int, x []float64, incX int) float64 { + var sum float64 + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if incX > 0 && (n-1)*incX >= len(x) { + panic(badX) + } + if incX == 1 { + x = x[:n] + for _, v := range x { + sum += math.Abs(v) + } + return sum + } + for i := 0; i < n; i++ { + sum += math.Abs(x[i*incX]) + } + return sum +} + +// Idamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Idamax returns -1 if n == 0. +func (Implementation) Idamax(n int, x []float64, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return -1 + } + if incX > 0 && (n-1)*incX >= len(x) { + panic(badX) + } + if n < 2 { + if n == 1 { + return 0 + } + if n == 0 { + return -1 // Netlib returns invalid index when n == 0 + } + panic(nLT0) + } + idx := 0 + max := math.Abs(x[0]) + if incX == 1 { + for i, v := range x[:n] { + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + v := x[ix] + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Dswap exchanges the elements of two vectors. +// x[i], y[i] = y[i], x[i] for all i +func (Implementation) Dswap(n int, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} + +// Dcopy copies the elements of x into the elements of y. +// y[i] = x[i] for all i +func (Implementation) Dcopy(n int, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Daxpy adds alpha times x to y +// y[i] += alpha * x[i] for all i +func (Implementation) Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + f64.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + f64.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Drotg computes the plane rotation +// _ _ _ _ _ _ +// | c s | | a | | r | +// | -s c | * | b | = | 0 | +// ‾ ‾ ‾ ‾ ‾ ‾ +// where +// r = ±√(a^2 + b^2) +// c = a/r, the cosine of the plane rotation +// s = b/r, the sine of the plane rotation +// +// NOTE: There is a discrepancy between the refence implementation and the BLAS +// technical manual regarding the sign for r when a or b are zero. +// Drotg agrees with the definition in the manual and other +// common BLAS implementations. +func (Implementation) Drotg(a, b float64) (c, s, r, z float64) { + if b == 0 && a == 0 { + return 1, 0, a, 0 + } + absA := math.Abs(a) + absB := math.Abs(b) + aGTb := absA > absB + r = math.Hypot(a, b) + if aGTb { + r = math.Copysign(r, a) + } else { + r = math.Copysign(r, b) + } + c = a / r + s = b / r + if aGTb { + z = s + } else if c != 0 { // r == 0 case handled above + z = 1 / c + } else { + z = 1 + } + return +} + +// Drotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +func (Implementation) Drotmg(d1, d2, x1, y1 float64) (p blas.DrotmParams, rd1, rd2, rx1 float64) { + // The implementation of Drotmg used here is taken from Hopkins 1997 + // Appendix A: https://doi.org/10.1145/289251.289253 + // with the exception of the gam constants below. + + const ( + gam = 4096.0 + gamsq = gam * gam + rgamsq = 1.0 / gamsq + ) + + if d1 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + if d2 == 0 || y1 == 0 { + p.Flag = blas.Identity + return p, d1, d2, x1 + } + + var h11, h12, h21, h22 float64 + if (d1 == 0 || x1 == 0) && d2 > 0 { + p.Flag = blas.Diagonal + h12 = 1 + h21 = -1 + x1 = y1 + d1, d2 = d2, d1 + } else { + p2 := d2 * y1 + p1 := d1 * x1 + q2 := p2 * y1 + q1 := p1 * x1 + if math.Abs(q1) > math.Abs(q2) { + p.Flag = blas.OffDiagonal + h11 = 1 + h22 = 1 + h21 = -y1 / x1 + h12 = p2 / p1 + u := 1 - h12*h21 + if u <= 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + d1 /= u + d2 /= u + x1 *= u + } else { + if q2 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + p.Flag = blas.Diagonal + h21 = -1 + h12 = 1 + h11 = p1 / p2 + h22 = x1 / y1 + u := 1 + h11*h22 + d1, d2 = d2/u, d1/u + x1 = y1 * u + } + } + + for d1 <= rgamsq && d1 != 0 { + p.Flag = blas.Rescaling + d1 = (d1 * gam) * gam + x1 /= gam + h11 /= gam + h12 /= gam + } + for d1 > gamsq { + p.Flag = blas.Rescaling + d1 = (d1 / gam) / gam + x1 *= gam + h11 *= gam + h12 *= gam + } + + for math.Abs(d2) <= rgamsq && d2 != 0 { + p.Flag = blas.Rescaling + d2 = (d2 * gam) * gam + h21 /= gam + h22 /= gam + } + for math.Abs(d2) > gamsq { + p.Flag = blas.Rescaling + d2 = (d2 / gam) / gam + h21 *= gam + h22 *= gam + } + + switch p.Flag { + case blas.Diagonal: + p.H = [4]float64{0: h11, 3: h22} + case blas.OffDiagonal: + p.H = [4]float64{1: h21, 2: h12} + case blas.Rescaling: + p.H = [4]float64{h11, h21, h12, h22} + default: + panic("blas: unexpected blas.Flag") + } + + return p, d1, d2, x1 +} + +// Drot applies a plane transformation. +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] +func (Implementation) Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = c*vx+s*vy, c*vy-s*vx + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx + ix += incX + iy += incY + } +} + +// Drotm applies the modified Givens rotation to the 2×n matrix. +func (Implementation) Drotm(n int, x []float64, incX int, y []float64, incY int, p blas.DrotmParams) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + + if p.Flag == blas.Identity { + return + } + + switch p.Flag { + case blas.Rescaling: + h11 := p.H[0] + h12 := p.H[2] + h21 := p.H[1] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 + ix += incX + iy += incY + } + case blas.OffDiagonal: + h12 := p.H[2] + h21 := p.H[1] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx+vy*h12, vx*h21+vy + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx+vy*h12, vx*h21+vy + ix += incX + iy += incY + } + case blas.Diagonal: + h11 := p.H[0] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy, -vx+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 + ix += incX + iy += incY + } + } +} + +// Dscal scales x by alpha. +// x[i] *= alpha +// Dscal has no effect if incX < 0. +func (Implementation) Dscal(n int, alpha float64, x []float64, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(badX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + f64.ScalUnitary(alpha, x[:n]) + return + } + f64.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1double_ddot.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1double_ddot.go new file mode 100644 index 00000000..95205e75 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1double_ddot.go @@ -0,0 +1,49 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Ddot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +func (Implementation) Ddot(n int, x []float64, incX int, y []float64, incY int) float64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(badX) + } + if len(y) < n { + panic(badY) + } + return f64.DotUnitary(x[:n], y) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(badX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(badY) + } + return f64.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single.go new file mode 100644 index 00000000..c34cd943 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single.go @@ -0,0 +1,644 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + math "gonum.org/v1/gonum/internal/math32" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level1 = Implementation{} + +// Snrm2 computes the Euclidean norm of a vector, +// sqrt(\sum_i x[i] * x[i]). +// This function returns 0 if incX is negative. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Snrm2(n int, x []float32, incX int) float32 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if incX > 0 && (n-1)*incX >= len(x) { + panic(badX) + } + if n < 2 { + if n == 1 { + return math.Abs(x[0]) + } + if n == 0 { + return 0 + } + panic(nLT0) + } + var ( + scale float32 = 0 + sumSquares float32 = 1 + ) + if incX == 1 { + x = x[:n] + for _, v := range x { + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) + } + for ix := 0; ix < n*incX; ix += incX { + val := x[ix] + if val == 0 { + continue + } + absxi := math.Abs(val) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// Sasum computes the sum of the absolute values of the elements of x. +// \sum_i |x[i]| +// Sasum returns 0 if incX is negative. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sasum(n int, x []float32, incX int) float32 { + var sum float32 + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if incX > 0 && (n-1)*incX >= len(x) { + panic(badX) + } + if incX == 1 { + x = x[:n] + for _, v := range x { + sum += math.Abs(v) + } + return sum + } + for i := 0; i < n; i++ { + sum += math.Abs(x[i*incX]) + } + return sum +} + +// Isamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Isamax returns -1 if n == 0. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Isamax(n int, x []float32, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return -1 + } + if incX > 0 && (n-1)*incX >= len(x) { + panic(badX) + } + if n < 2 { + if n == 1 { + return 0 + } + if n == 0 { + return -1 // Netlib returns invalid index when n == 0 + } + panic(nLT0) + } + idx := 0 + max := math.Abs(x[0]) + if incX == 1 { + for i, v := range x[:n] { + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + v := x[ix] + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Sswap exchanges the elements of two vectors. +// x[i], y[i] = y[i], x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sswap(n int, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} + +// Scopy copies the elements of x into the elements of y. +// y[i] = x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Scopy(n int, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Saxpy adds alpha times x to y +// y[i] += alpha * x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + f32.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + f32.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Srotg computes the plane rotation +// _ _ _ _ _ _ +// | c s | | a | | r | +// | -s c | * | b | = | 0 | +// ‾ ‾ ‾ ‾ ‾ ‾ +// where +// r = ±√(a^2 + b^2) +// c = a/r, the cosine of the plane rotation +// s = b/r, the sine of the plane rotation +// +// NOTE: There is a discrepancy between the refence implementation and the BLAS +// technical manual regarding the sign for r when a or b are zero. +// Srotg agrees with the definition in the manual and other +// common BLAS implementations. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotg(a, b float32) (c, s, r, z float32) { + if b == 0 && a == 0 { + return 1, 0, a, 0 + } + absA := math.Abs(a) + absB := math.Abs(b) + aGTb := absA > absB + r = math.Hypot(a, b) + if aGTb { + r = math.Copysign(r, a) + } else { + r = math.Copysign(r, b) + } + c = a / r + s = b / r + if aGTb { + z = s + } else if c != 0 { // r == 0 case handled above + z = 1 / c + } else { + z = 1 + } + return +} + +// Srotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotmg(d1, d2, x1, y1 float32) (p blas.SrotmParams, rd1, rd2, rx1 float32) { + // The implementation of Drotmg used here is taken from Hopkins 1997 + // Appendix A: https://doi.org/10.1145/289251.289253 + // with the exception of the gam constants below. + + const ( + gam = 4096.0 + gamsq = gam * gam + rgamsq = 1.0 / gamsq + ) + + if d1 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + if d2 == 0 || y1 == 0 { + p.Flag = blas.Identity + return p, d1, d2, x1 + } + + var h11, h12, h21, h22 float32 + if (d1 == 0 || x1 == 0) && d2 > 0 { + p.Flag = blas.Diagonal + h12 = 1 + h21 = -1 + x1 = y1 + d1, d2 = d2, d1 + } else { + p2 := d2 * y1 + p1 := d1 * x1 + q2 := p2 * y1 + q1 := p1 * x1 + if math.Abs(q1) > math.Abs(q2) { + p.Flag = blas.OffDiagonal + h11 = 1 + h22 = 1 + h21 = -y1 / x1 + h12 = p2 / p1 + u := 1 - h12*h21 + if u <= 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + d1 /= u + d2 /= u + x1 *= u + } else { + if q2 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + p.Flag = blas.Diagonal + h21 = -1 + h12 = 1 + h11 = p1 / p2 + h22 = x1 / y1 + u := 1 + h11*h22 + d1, d2 = d2/u, d1/u + x1 = y1 * u + } + } + + for d1 <= rgamsq && d1 != 0 { + p.Flag = blas.Rescaling + d1 = (d1 * gam) * gam + x1 /= gam + h11 /= gam + h12 /= gam + } + for d1 > gamsq { + p.Flag = blas.Rescaling + d1 = (d1 / gam) / gam + x1 *= gam + h11 *= gam + h12 *= gam + } + + for math.Abs(d2) <= rgamsq && d2 != 0 { + p.Flag = blas.Rescaling + d2 = (d2 * gam) * gam + h21 /= gam + h22 /= gam + } + for math.Abs(d2) > gamsq { + p.Flag = blas.Rescaling + d2 = (d2 / gam) / gam + h21 *= gam + h22 *= gam + } + + switch p.Flag { + case blas.Diagonal: + p.H = [4]float32{0: h11, 3: h22} + case blas.OffDiagonal: + p.H = [4]float32{1: h21, 2: h12} + case blas.Rescaling: + p.H = [4]float32{h11, h21, h12, h22} + default: + panic("blas: unexpected blas.Flag") + } + + return p, d1, d2, x1 +} + +// Srot applies a plane transformation. +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srot(n int, x []float32, incX int, y []float32, incY int, c float32, s float32) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = c*vx+s*vy, c*vy-s*vx + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx + ix += incX + iy += incY + } +} + +// Srotm applies the modified Givens rotation to the 2×n matrix. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotm(n int, x []float32, incX int, y []float32, incY int, p blas.SrotmParams) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + + if p.Flag == blas.Identity { + return + } + + switch p.Flag { + case blas.Rescaling: + h11 := p.H[0] + h12 := p.H[2] + h21 := p.H[1] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 + ix += incX + iy += incY + } + case blas.OffDiagonal: + h12 := p.H[2] + h21 := p.H[1] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx+vy*h12, vx*h21+vy + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx+vy*h12, vx*h21+vy + ix += incX + iy += incY + } + case blas.Diagonal: + h11 := p.H[0] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy, -vx+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 + ix += incX + iy += incY + } + } +} + +// Sscal scales x by alpha. +// x[i] *= alpha +// Sscal has no effect if incX < 0. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sscal(n int, alpha float32, x []float32, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(badX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + f32.ScalUnitary(alpha, x[:n]) + return + } + f32.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_dsdot.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_dsdot.go new file mode 100644 index 00000000..3c9ef122 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_dsdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Dsdot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(badX) + } + if len(y) < n { + panic(badY) + } + return f32.DdotUnitary(x[:n], y) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(badX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(badY) + } + return f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_sdot.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_sdot.go new file mode 100644 index 00000000..72fe6f81 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_sdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sdot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sdot(n int, x []float32, incX int, y []float32, incY int) float32 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(badX) + } + if len(y) < n { + panic(badY) + } + return f32.DotUnitary(x[:n], y) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(badX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(badY) + } + return f32.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_sdsdot.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_sdsdot.go new file mode 100644 index 00000000..81142c48 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level1single_sdsdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sdsdot computes the dot product of the two vectors plus a constant +// alpha + \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(badX) + } + if len(y) < n { + panic(badY) + } + return alpha + float32(f32.DdotUnitary(x[:n], y)) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(badX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(badY) + } + return alpha + float32(f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy))) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go new file mode 100644 index 00000000..6af2a5ba --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go @@ -0,0 +1,2489 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math/cmplx" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c128" +) + +// Zgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix +// with kL sub-diagonals and kU super-diagonals. +func (Implementation) Zgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex128, ab []complex128, ldab int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + checkZBandMatrix('A', m, n, kL, kU, ab, ldab) + var lenX, lenY int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + lenX = n + lenY = m + case blas.Trans, blas.ConjTrans: + lenX = m + lenY = n + } + checkZVector('x', lenX, x, incX) + checkZVector('y', lenY, y, incY) + + if m == 0 || n == 0 || (alpha == 0 && beta == 1) { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c128.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + nRow := min(m, n+kL) + nCol := kL + 1 + kU + switch trans { + case blas.NoTrans: + iy := ky + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := ab[i*ldab+l : i*ldab+u] + off := max(0, i-kL) + xtmp := x[off : off+u-l] + var sum complex128 + for j, v := range aRow { + sum += xtmp[j] * v + } + y[iy] += alpha * sum + iy += incY + } + } else { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := ab[i*ldab+l : i*ldab+u] + off := max(0, i-kL) * incX + jx := kx + var sum complex128 + for _, v := range aRow { + sum += x[off+jx] * v + jx += incX + } + y[iy] += alpha * sum + iy += incY + } + } + case blas.Trans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := ab[i*ldab+l : i*ldab+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := ab[i*ldab+l : i*ldab+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + ix += incX + } + } + case blas.ConjTrans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := ab[i*ldab+l : i*ldab+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := ab[i*ldab+l : i*ldab+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + ix += incX + } + } + } +} + +// Zgemv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. +func (Implementation) Zgemv(trans blas.Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + checkZMatrix('A', m, n, a, lda) + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + checkZVector('x', n, x, incX) + checkZVector('y', m, y, incY) + case blas.Trans, blas.ConjTrans: + checkZVector('x', m, x, incX) + checkZVector('y', n, y, incY) + } + + if m == 0 || n == 0 || (alpha == 0 && beta == 1) { + return + } + + var lenX, lenY int + if trans == blas.NoTrans { + lenX = n + lenY = m + } else { + lenX = m + lenY = n + } + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c128.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + switch trans { + default: + // Form y = alpha*A*x + y. + iy := ky + if incX == 1 { + for i := 0; i < m; i++ { + y[iy] += alpha * c128.DotuUnitary(a[i*lda:i*lda+n], x[:n]) + iy += incY + } + return + } + for i := 0; i < m; i++ { + y[iy] += alpha * c128.DotuInc(a[i*lda:i*lda+n], x, uintptr(n), 1, uintptr(incX), 0, uintptr(kx)) + iy += incY + } + return + + case blas.Trans: + // Form y = alpha*A^T*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + c128.AxpyUnitary(alpha*x[ix], a[i*lda:i*lda+n], y[:n]) + ix += incX + } + return + } + for i := 0; i < m; i++ { + c128.AxpyInc(alpha*x[ix], a[i*lda:i*lda+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + ix += incX + } + return + + case blas.ConjTrans: + // Form y = alpha*A^H*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + for j := 0; j < n; j++ { + y[j] += tmp * cmplx.Conj(a[i*lda+j]) + } + ix += incX + } + return + } + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + jy := ky + for j := 0; j < n; j++ { + y[jy] += tmp * cmplx.Conj(a[i*lda+j]) + jy += incY + } + ix += incX + } + return + } +} + +// Zgerc performs the rank-one operation +// A += alpha * x * y^H +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +func (Implementation) Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + checkZMatrix('A', m, n, a, lda) + checkZVector('x', m, x, incX) + checkZVector('y', n, y, incY) + + if m == 0 || n == 0 || alpha == 0 { + return + } + + var kx, jy int + if incX < 0 { + kx = (1 - m) * incX + } + if incY < 0 { + jy = (1 - n) * incY + } + for j := 0; j < n; j++ { + if y[jy] != 0 { + tmp := alpha * cmplx.Conj(y[jy]) + c128.AxpyInc(tmp, x, a[j:], uintptr(m), uintptr(incX), uintptr(lda), uintptr(kx), 0) + } + jy += incY + } +} + +// Zgeru performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +func (Implementation) Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + checkZMatrix('A', m, n, a, lda) + checkZVector('x', m, x, incX) + checkZVector('y', n, y, incY) + + if m == 0 || n == 0 || alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - m) * incX + } + if incY == 1 { + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c128.AxpyUnitary(tmp, y[:n], a[i*lda:i*lda+n]) + } + kx += incX + } + return + } + var jy int + if incY < 0 { + jy = (1 - n) * incY + } + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c128.AxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(jy), 0) + } + kx += incX + } +} + +// Zhbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian band matrix with k super-diagonals. The imaginary parts of +// the diagonal elements of A are ignored and assumed to be zero. +func (Implementation) Zhbmv(uplo blas.Uplo, n, k int, alpha complex128, ab []complex128, ldab int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + checkZhbMatrix('A', n, k, ab, ldab) + checkZVector('x', n, x, incX) + checkZVector('y', n, y, incY) + + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through ab. + switch uplo { + case blas.Upper: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + aRow := ab[i*ldab:] + alphaxi := alpha * x[i] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[i+j] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += sum + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + aRow := ab[i*ldab:] + alphaxi := alpha * x[ix] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jx := incX + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + } + case blas.Lower: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[i] + jy := l * incY + aRow := ab[i*ldab:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[ix] + jx := l * incX + jy := l * incY + aRow := ab[i*ldab:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + ix += incX + iy += incY + } + } + } +} + +// Zhemv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix. The imaginary parts of the diagonal elements of A are +// ignored and assumed to be zero. +func (Implementation) Zhemv(uplo blas.Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + checkZMatrix('A', n, n, a, lda) + checkZVector('x', n, x, incX) + checkZVector('y', n, y, incY) + + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through + // the triangular part of A. + + if uplo == blas.Upper { + // Form y when A is stored in upper triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := ix + jy := iy + for j := i + 1; j < n; j++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } + return + } + + // Form y when A is stored in lower triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := kx + jy := ky + for j := 0; j < i; j++ { + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } +} + +// Zher performs the Hermitian rank-one operation +// A += alpha * x * x^H +// where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n +// element vector. On entry, the imaginary parts of the diagonal elements of A +// are ignored and assumed to be zero, on return they will be set to zero. +func (Implementation) Zher(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + checkZMatrix('A', n, n, a, lda) + checkZVector('x', n, x, incX) + + if n == 0 || alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + jx := ix + incX + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } + return + } + + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + jx := kx + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } +} + +// Zher2 performs the Hermitian rank-two operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a scalar, x and y are n element vectors and A is an n×n +// Hermitian matrix. On entry, the imaginary parts of the diagonal elements are +// ignored and assumed to be zero. On return they will be set to zero. +func (Implementation) Zher2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + checkZMatrix('A', n, n, a, lda) + checkZVector('x', n, x, incX) + checkZVector('y', n, y, incY) + + if n == 0 || alpha == 0 { + return + } + + var kx, ky int + var ix, iy int + if incX != 1 || incY != 1 { + if incX < 0 { + kx = (1 - n) * incX + } + if incY < 0 { + ky = (1 - n) * incY + } + ix = kx + iy = ky + } + if uplo == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } + return + } + + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } +} + +// Zhpmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix in packed form. The imaginary parts of the diagonal +// elements of A are ignored and assumed to be zero. +func (Implementation) Zhpmv(uplo blas.Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + checkZVector('x', n, x, incX) + checkZVector('y', n, y, incY) + if len(ap) < n*(n+1)/2 { + panic("blas: insufficient A packed matrix slice length") + } + + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] *= beta + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form y when ap contains the upper triangle. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + y[i] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex128 + k := kk + 1 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + y[i] += alpha * tmp2 + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + y[iy] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex128 + jx := ix + jy := iy + for k := kk + 1; k < kk+n-i; k++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + } + y[iy] += alpha * tmp2 + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form y when ap contains the lower triangle. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + k := kk + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + aii := complex(real(ap[kk+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(ap[kk+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Zhpr performs the Hermitian rank-1 operation +// A += alpha * x * x^H +// where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix +// in packed form. On entry, the imaginary parts of the diagonal elements are +// assumed to be zero, and on return they are set to zero. +func (Implementation) Zhpr(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, ap []complex128) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + checkZVector('x', n, x, incX) + if len(ap) < n*(n+1)/2 { + panic("blas: insufficient A packed matrix slice length") + } + + if n == 0 || alpha == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + a := ap[kk+1 : kk+n-i] + x := x[i+1 : n] + for j, v := range x { + a[j] += tmp * cmplx.Conj(v) + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + jx := ix + incX + a := ap[kk+1 : kk+n-i] + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + for j, v := range x[:i] { + a[j] += tmp * cmplx.Conj(v) + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + jx := kx + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + kk += i + 1 + } + } +} + +// Zhpr2 performs the Hermitian rank-2 operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a complex scalar, x and y are n element vectors, and A is an +// n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts +// of the diagonal elements are assumed to be zero, and on return they are set to zero. +func (Implementation) Zhpr2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + checkZVector('x', n, x, incX) + checkZVector('y', n, y, incY) + if len(ap) < n*(n+1)/2 { + panic("blas: insufficient A packed matrix slice length") + } + + if n == 0 || alpha == 0 { + return + } + + // Set up start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk] = complex(aii, 0) + k := kk + 1 + for j := i + 1; j < n; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for k := kk + 1; k < kk+n-i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + k := kk + for j := 0; j < i; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Ztbmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular band matrix, with +// (k+1) diagonals. +func (Implementation) Ztbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, ab []complex128, ldab int, x []complex128, incX int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans { + panic(badTranspose) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + checkZtbMatrix('A', n, k, ab, ldab) + checkZVector('x', n, x, incX) + + if n == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if diag == blas.NonUnit { + xi *= ab[i*ldab] + } + kk := min(k, n-i-1) + for j, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + xi += x[i+j+1] * aij + } + x[i] = xi + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if diag == blas.NonUnit { + xi *= ab[i*ldab] + } + kk := min(k, n-i-1) + jx := ix + incX + for _, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + xi *= ab[i*ldab+k] + } + kk := min(k, i) + for j, aij := range ab[i*ldab+k-kk : i*ldab+k] { + xi += x[i-kk+j] * aij + } + x[i] = xi + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + xi *= ab[i*ldab+k] + } + kk := min(k, i) + jx := ix - kk*incX + for _, aij := range ab[i*ldab+k-kk : i*ldab+k] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix -= incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[i+j+1] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= ab[i*ldab] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= ab[i*ldab] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[i-kk+j] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= ab[i*ldab+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= ab[i*ldab+k] + } + ix += incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[i+j+1] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ab[i*ldab]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ab[i*ldab]) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[i-kk+j] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ab[i*ldab+k]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ab[i*ldab+k]) + } + ix += incX + } + } + } + } +} + +// Ztbsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular band matrix +// with (k+1) diagonals. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, ab []complex128, ldab int, x []complex128, incX int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans { + panic(badTranspose) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + checkZtbMatrix('A', n, k, ab, ldab) + checkZVector('x', n, x, incX) + + if n == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex128 + for j, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + sum += x[i+1+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= ab[i*ldab] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex128 + jx := ix + incX + for _, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= ab[i*ldab] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex128 + for j, aij := range ab[i*ldab+k-kk : i*ldab+k] { + sum += x[i-kk+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= ab[i*ldab+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex128 + jx := ix - kk*incX + for _, aij := range ab[i*ldab+k-kk : i*ldab+k] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= ab[i*ldab+k] + } + ix += incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= ab[i*ldab] + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[i+1+j] -= xi * aij + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= ab[i*ldab] + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[jx] -= xi * aij + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= ab[i*ldab+k] + } + kk := min(k, i) + xi := x[i] + for j, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[i-kk+j] -= xi * aij + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= ab[i*ldab+k] + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[jx] -= xi * aij + jx += incX + } + ix -= incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(ab[i*ldab]) + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[i+1+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(ab[i*ldab]) + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range ab[i*ldab+1 : i*ldab+kk+1] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(ab[i*ldab+k]) + } + kk := min(k, i) + xi := x[i] + for j, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[i-kk+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(ab[i*ldab+k]) + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range ab[i*ldab+k-kk : i*ldab+k] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix -= incX + } + } + } + } +} + +// Ztpmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular matrix, supplied in +// packed form. +func (Implementation) Ztpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans { + panic(badTranspose) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + checkZVector('x', n, x, incX) + if len(ap) < n*(n+1)/2 { + panic("blas: insufficient A packed matrix slice length") + } + + if n == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + x[i] += c128.DotuUnitary(ap[kk+1:kk+n-i], x[i+1:]) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + x[ix] += c128.DotuInc(ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + kk += n - i + } + } + } else { + // kk points to the beginning of current row in ap. + kk := n*(n+1)/2 - n + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + if i > 0 { + x[i] += c128.DotuUnitary(ap[kk:kk+i], x[:i]) + } + kk -= i + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + if i > 0 { + x[ix] += c128.DotuInc(ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + kk -= i + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + c128.AxpyUnitary(xi, ap[kk+1:kk+n-i], x[i+1:n]) + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + c128.AxpyInc(xi, ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if i > 0 { + c128.AxpyUnitary(x[i], ap[kk:kk+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyInc(x[ix], ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk]) + } + k := kk + 1 + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(ap[k]) + k++ + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk]) + } + jx := ix + incX + k := kk + 1 + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(ap[k]) + jx += incX + k++ + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i, xi := range x { + for j := 0; j < i; j++ { + x[j] += xi * cmplx.Conj(ap[kk+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk+i]) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + jx := kx + for j := 0; j < i; j++ { + x[jx] += xi * cmplx.Conj(ap[kk+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk+i]) + } + ix += incX + kk += i + 1 + } + } + } +} + +// Ztpsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix in +// packed form. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans { + panic(badTranspose) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + if len(ap) < n*(n+1)/2 { + panic("blas: insufficient A packed matrix slice length") + } + checkZVector('x', n, x, incX) + + if n == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[i] -= c128.DotuUnitary(x[i+1:n], ap[kk+1:kk+n-i]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[ix] -= c128.DotuInc(x, ap[kk+1:kk+n-i], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + kk := 0 + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c128.DotuUnitary(x[:i], ap[kk:kk+i+1]) + } + if diag == blas.NonUnit { + x[i] /= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c128.DotuInc(x, ap[kk:kk+i+1], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= ap[kk] + } + if n-j-1 > 0 { + c128.AxpyUnitary(-x[j], ap[kk+1:kk+n-j], x[j+1:n]) + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= ap[kk] + } + if n-j-1 > 0 { + c128.AxpyInc(-x[jx], ap[kk+1:kk+n-j], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= ap[kk+j] + } + if j > 0 { + c128.AxpyUnitary(-x[j], ap[kk:kk+j], x[:j]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= ap[kk+j] + } + if j > 0 { + c128.AxpyInc(-x[jx], ap[kk:kk+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + kk -= j + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk]) + } + xj := x[j] + k := kk + 1 + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(ap[k]) + k++ + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk]) + } + xj := x[jx] + ix := jx + incX + k := kk + 1 + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(ap[k]) + ix += incX + k++ + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(ap[kk+i]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(ap[kk+i]) + ix += incX + } + jx -= incX + kk -= j + } + } + } +} + +// Ztrmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is a vector, and A is an n×n triangular matrix. +func (Implementation) Ztrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans { + panic(badTranspose) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + checkZMatrix('A', n, n, a, lda) + checkZVector('x', n, x, incX) + + if n == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[i] += c128.DotuUnitary(a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[ix] += c128.DotuInc(a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if i > 0 { + x[i] += c128.DotuUnitary(a[i*lda:i*lda+i], x[:i]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if i > 0 { + x[ix] += c128.DotuInc(a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + c128.AxpyUnitary(xi, a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + c128.AxpyInc(xi, a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyUnitary(x[i], a[i*lda:i*lda+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyInc(x[ix], a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(a[i*lda+j]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + jx := ix + incX + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(a[i*lda+j]) + jx += incX + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + x[j] += x[i] * cmplx.Conj(a[i*lda+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + jx := kx + for j := 0; j < i; j++ { + x[jx] += x[ix] * cmplx.Conj(a[i*lda+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + ix += incX + } + } + } +} + +// Ztrsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztrsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans { + panic(badTranspose) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + checkZMatrix('A', n, n, a, lda) + checkZVector('x', n, x, incX) + + if n == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[i] -= c128.DotuUnitary(x[i+1:n], a[i*lda+i+1:i*lda+n]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[ix] -= c128.DotuInc(x, a[i*lda+i+1:i*lda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c128.DotuUnitary(x[:i], a[i*lda:i*lda+i]) + } + if diag == blas.NonUnit { + x[i] /= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c128.DotuInc(x, a[i*lda:i*lda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + if n-j-1 > 0 { + c128.AxpyUnitary(-x[j], a[j*lda+j+1:j*lda+n], x[j+1:n]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if n-j-1 > 0 { + c128.AxpyInc(-x[jx], a[j*lda+j+1:j*lda+n], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + xj := x[j] + if j > 0 { + c128.AxpyUnitary(-xj, a[j*lda:j*lda+j], x[:j]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if j > 0 { + c128.AxpyInc(-x[jx], a[j*lda:j*lda+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := jx + incX + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx -= incX + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2double.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2double.go new file mode 100644 index 00000000..ce5f9ed1 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2double.go @@ -0,0 +1,2070 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level2 = Implementation{} + +// Dger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func (Implementation) Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { + // Check inputs + if m < 0 { + panic("m < 0") + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (m-1)*incX >= len(x)) || (incX < 0 && (1-m)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(m-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if lda < max(1, n) { + panic(badLdA) + } + + // Quick return if possible + if m == 0 || n == 0 || alpha == 0 { + return + } + f64.Ger(uintptr(m), uintptr(n), + alpha, + x, uintptr(incX), + y, uintptr(incY), + a, uintptr(lda)) +} + +// Dgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans +// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, +// x and y are vectors, and alpha and beta are scalars. +func (Implementation) Dgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kL < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(badY) + } + if lda*(min(m, n+kL)-1)+kL+kU+1 > len(a) || lda < kL+kU+1 { + panic(badLdA) + } + + // Quick return if possible + if m == 0 || n == 0 || (alpha == 0 && beta == 1) { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // First form y = beta * y + if incY > 0 { + Implementation{}.Dscal(lenY, beta, y, incY) + } else { + Implementation{}.Dscal(lenY, beta, y, -incY) + } + + if alpha == 0 { + return + } + + // i and j are indices of the compacted banded matrix. + // off is the offset into the dense matrix (off + j = densej) + nCol := kU + 1 + kL + if tA == blas.NoTrans { + iy := ky + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + xtmp := x[off : off+u-l] + var sum float64 + for j, v := range atmp { + sum += xtmp[j] * v + } + y[iy] += sum * alpha + iy += incY + } + return + } + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + jx := kx + var sum float64 + for _, v := range atmp { + sum += x[off*incX+jx] * v + jx += incX + } + y[iy] += sum * alpha + iy += incY + } + return + } + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[i] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + } + return + } + ix := kx + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[ix] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + ix += incX + } +} + +// Dtrmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x is a vector. +func (Implementation) Dtrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < n { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if n == 0 { + return + } + nonUnit := d != blas.Unit + if n == 1 { + if nonUnit { + x[0] *= a[0] + } + return + } + var kx int + if incX <= 0 { + kx = -(n - 1) * incX + } + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[i] + } else { + tmp = x[i] + } + xtmp := x[i+1:] + x[i] = tmp + f64.DotUnitary(a[ilda+i+1:ilda+n], xtmp) + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f64.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp += a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f64.DotUnitary(a[ilda:ilda+i], x) + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f64.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + ix -= incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[i] + f64.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) + if nonUnit { + x[i] *= a[ilda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[ix] + f64.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[i] + f64.AxpyUnitary(xi, a[ilda:ilda+i], x) + if nonUnit { + x[i] *= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[ix] + f64.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix += incX + } +} + +// Dtrsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtrsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { + // Test the input parameters + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + // Quick return if possible + if n == 0 { + return + } + if n == 1 { + if d == blas.NonUnit { + x[0] /= a[0] + } + return + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + var sum float64 + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := i + j + 1 + sum += x[jv] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + var sum float64 + jx := ix + incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + var sum float64 + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + sum += x[j] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + var sum float64 + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := j + i + 1 + x[jv] -= v * xi + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + (i+1)*incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + x[j] -= v * xi + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix -= incX + } +} + +// Dsymv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +func (Implementation) Dsymv(ul blas.Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + // Check inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda > 1 && lda < n { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + // Quick return if possible + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY > 0 { + Implementation{}.Dscal(n, beta, y, incY) + } else { + Implementation{}.Dscal(n, beta, y, -incY) + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := x[i] * a[i*lda+i] + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jp := j + i + 1 + sum += x[jp] * v + y[jy] += xv * v + jy += incY + } + y[iy] += alpha * sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := x[ix] * a[i*lda+i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + jy := ky + xv := alpha * x[i] + atmp := a[i*lda : i*lda+i] + var sum float64 + for j, v := range atmp { + sum += x[j] * v + y[jy] += xv * v + jy += incY + } + sum += x[i] * a[i*lda+i] + sum *= alpha + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xv := alpha * x[ix] + atmp := a[i*lda : i*lda+i] + var sum float64 + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + sum += x[ix] * a[i*lda+i] + sum *= alpha + y[iy] += sum + ix += incX + iy += incY + } +} + +// Dtbmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. +func (Implementation) Dtbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda*(n-1)+k+1 > len(a) || lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonunit := d != blas.Unit + + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float64 + atmp := a[i*lda:] + xtmp := x[i:] + for j := 1; j < u; j++ { + sum += xtmp[j] * atmp[j] + } + if nonunit { + sum += xtmp[0] * atmp[0] + } else { + sum += xtmp[0] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float64 + atmp := a[i*lda:] + jx := incX + for j := 1; j < u; j++ { + sum += x[ix+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[0] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float64 + for j := l; j < k; j++ { + sum += x[i-k+j] * atmp[j] + } + if nonunit { + sum += x[i] * atmp[k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float64 + jx := l * incX + for j := l; j < k; j++ { + sum += x[ix-k*incX+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[k] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float64 + for j := 1; j < u; j++ { + sum += x[i-j] * a[(i-j)*lda+j] + } + if nonunit { + sum += x[i] * a[i*lda] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float64 + jx := incX + for j := 1; j < u; j++ { + sum += x[ix-jx] * a[(i-j)*lda+j] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var sum float64 + for j := 0; j < u; j++ { + sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] + } + if nonunit { + sum += x[i] * a[i*lda+k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var ( + sum float64 + jx int + ) + for j := 0; j < u; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda+k] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } +} + +// Dtpmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x is a vector. +func (Implementation) Dtpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if len(ap) < (n*(n+1))/2 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xi += v * xtmp[j] + } + x[i] = xi + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset += n - i + ix += incX + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset-i : offset] + for j, v := range atmp { + xi += v * x[j] + } + x[i] = xi + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset -= i + 1 + ix -= incX + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + jx := kx + (i+1)*incX + atmp := ap[offset+1 : offset+n-i] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + offset -= n - i + 1 + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + jx := kx + atmp := ap[offset-i : offset] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + ix += incX + offset += i + 2 + } +} + +// Dtbsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, +// and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda*(n-1)+k+1 > len(a) || lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + // Form x = A^-1 x. + // Several cases below use subslices for speed improvement. + // The incX != 1 cases usually do not because incX may be negative. + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + atmp := a[i*lda+1:] + xtmp := x[i+1 : i+bands+1] + var sum float64 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + max := k + 1 + if i+max > n { + max = n - i + } + atmp := a[i*lda:] + var ( + jx int + sum float64 + ) + for j := 1; j < max; j++ { + jx += incX + sum += x[ix+jx] * atmp[j] + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[0] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + xtmp := x[i-bands : i] + var sum float64 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= atmp[bands] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * atmp[j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[bands] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var sum float64 + for j := 0; j < bands; j++ { + sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda] + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var sum float64 + xtmp := x[i+1 : i+1+bands] + for j, v := range xtmp { + sum += v * a[(i+j+1)*lda+k-j-1] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+k] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+k] + } + ix -= incX + } +} + +// Dsbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric band matrix with k super-diagonals, x and y are +// vectors, and alpha and beta are scalars. +func (Implementation) Dsbmv(ul blas.Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(n-1)+k+1 > len(a) || lda < k+1 { + panic(badLdA) + } + + // Quick return if possible + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up indexes + lenX := n + lenY := n + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // First form y = beta * y + if incY > 0 { + Implementation{}.Dscal(lenY, beta, y, incY) + } else { + Implementation{}.Dscal(lenY, beta, y, -incY) + } + + if alpha == 0 { + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[i] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[i+j] * v + y[iy+jy] += tmp * v + jy += incY + } + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[ix] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jx := incX + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + return + } + + // Casses where a has bands below the diagonal. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[i] + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += tmp * v + jy += incY + } + y[iy] += tmp * atmp[k] + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[ix] + jx := l * incX + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += tmp * atmp[k] + ix += incX + iy += incY + } +} + +// Dsyr performs the symmetric rank-one update +// A += alpha * x * x^T +// where A is an n×n symmetric matrix, and x is a vector. +func (Implementation) Dsyr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if alpha == 0 || n == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda+i : i*lda+n] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += v * tmp + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + jx := ix + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += x[jx] * tmp + jx += incX + } + } + ix += incX + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda:] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += tmp * v + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + atmp := a[i*lda:] + jx := kx + for j := 0; j < i+1; j++ { + atmp[j] += tmp * x[jx] + jx += incX + } + } + ix += incX + } +} + +// Dsyr2 performs the symmetric rank-two update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. +func (Implementation) Dsyr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } +} + +// Dtpsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if len(ap) < (n*(n+1))/2 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + offset = n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + var sum float64 + for j, v := range atmp { + sum += v * xtmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix -= incX + offset -= n - i + 1 + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i : offset] + var sum float64 + for j, v := range atmp { + sum += v * x[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i : offset] + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix += incX + offset += i + 2 + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] -= v * xi + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] -= v * xi + } + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix -= incX + offset -= i + 1 + } +} + +// Dspmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func (Implementation) Dspmv(ul blas.Uplo, n int, alpha float64, a []float64, x []float64, incX int, beta float64, y []float64, incY int) { + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if len(a) < (n*(n+1))/2 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + // Quick return if possible + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY > 0 { + Implementation{}.Dscal(n, beta, y, incY) + } else { + Implementation{}.Dscal(n, beta, y, -incY) + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := a[offset] * x[i] + atmp := a[offset+1 : offset+n-i] + xtmp := x[i+1:] + jy := ky + (i+1)*incY + for j, v := range atmp { + sum += v * xtmp[j] + y[jy] += v * xv + jy += incY + } + y[iy] += alpha * sum + iy += incY + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := a[offset] * x[ix] + atmp := a[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + atmp := a[offset-i : offset] + jy := ky + var sum float64 + for j, v := range atmp { + sum += v * x[j] + y[jy] += v * xv + jy += incY + } + sum += a[offset] * x[i] + y[iy] += alpha * sum + iy += incY + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + atmp := a[offset-i : offset] + jx := kx + jy := ky + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + + sum += a[offset] * x[ix] + y[iy] += alpha * sum + ix += incX + iy += incY + offset += i + 2 + } +} + +// Dspr performs the symmetric rank-one operation +// A += alpha * x * x^T +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +func (Implementation) Dspr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if len(a) < (n*(n+1))/2 { + panic(badLdA) + } + if alpha == 0 || n == 0 { + return + } + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + atmp := a[offset:] + xv := alpha * x[i] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + i*incX + atmp := a[offset:] + xv := alpha * x[ix] + for j := 0; j < n-i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := a[offset-i:] + xv := alpha * x[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := a[offset-i:] + xv := alpha * x[ix] + for j := 0; j <= i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += i + 2 + } +} + +// Dspr2 performs the symmetric rank-2 update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func (Implementation) Dspr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, ap []float64) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if len(ap) < (n*(n+1))/2 { + panic(badLdA) + } + if alpha == 0 { + return + } + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xi := x[i] + yi := y[i] + xtmp := x[i:n] + ytmp := y[i:n] + for j, v := range xtmp { + atmp[j] += alpha * (xi*ytmp[j] + v*yi) + } + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + atmp := ap[offset:] + xi := x[ix] + yi := y[iy] + for j := 0; j < n-i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xi := x[i] + yi := y[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += alpha * (xi*y[j] + v*yi) + } + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + atmp := ap[offset-i:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += i + 2 + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2single.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2single.go new file mode 100644 index 00000000..7bc8b0d4 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level2single.go @@ -0,0 +1,2102 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level2 = Implementation{} + +// Sger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { + // Check inputs + if m < 0 { + panic("m < 0") + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (m-1)*incX >= len(x)) || (incX < 0 && (1-m)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(m-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if lda < max(1, n) { + panic(badLdA) + } + + // Quick return if possible + if m == 0 || n == 0 || alpha == 0 { + return + } + f32.Ger(uintptr(m), uintptr(n), + alpha, + x, uintptr(incX), + y, uintptr(incY), + a, uintptr(lda)) +} + +// Sgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans +// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, +// x and y are vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kL < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(badY) + } + if lda*(min(m, n+kL)-1)+kL+kU+1 > len(a) || lda < kL+kU+1 { + panic(badLdA) + } + + // Quick return if possible + if m == 0 || n == 0 || (alpha == 0 && beta == 1) { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // First form y = beta * y + if incY > 0 { + Implementation{}.Sscal(lenY, beta, y, incY) + } else { + Implementation{}.Sscal(lenY, beta, y, -incY) + } + + if alpha == 0 { + return + } + + // i and j are indices of the compacted banded matrix. + // off is the offset into the dense matrix (off + j = densej) + nCol := kU + 1 + kL + if tA == blas.NoTrans { + iy := ky + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + xtmp := x[off : off+u-l] + var sum float32 + for j, v := range atmp { + sum += xtmp[j] * v + } + y[iy] += sum * alpha + iy += incY + } + return + } + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + jx := kx + var sum float32 + for _, v := range atmp { + sum += x[off*incX+jx] * v + jx += incX + } + y[iy] += sum * alpha + iy += incY + } + return + } + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[i] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + } + return + } + ix := kx + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[ix] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + ix += incX + } +} + +// Strmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < n { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if n == 0 { + return + } + nonUnit := d != blas.Unit + if n == 1 { + if nonUnit { + x[0] *= a[0] + } + return + } + var kx int + if incX <= 0 { + kx = -(n - 1) * incX + } + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[i] + } else { + tmp = x[i] + } + xtmp := x[i+1:] + x[i] = tmp + f32.DotUnitary(a[ilda+i+1:ilda+n], xtmp) + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f32.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp += a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f32.DotUnitary(a[ilda:ilda+i], x) + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f32.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + ix -= incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[i] + f32.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) + if nonUnit { + x[i] *= a[ilda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[ix] + f32.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[i] + f32.AxpyUnitary(xi, a[ilda:ilda+i], x) + if nonUnit { + x[i] *= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[ix] + f32.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix += incX + } +} + +// Strsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { + // Test the input parameters + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + // Quick return if possible + if n == 0 { + return + } + if n == 1 { + if d == blas.NonUnit { + x[0] /= a[0] + } + return + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + var sum float32 + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := i + j + 1 + sum += x[jv] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + var sum float32 + jx := ix + incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + var sum float32 + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + sum += x[j] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + var sum float32 + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := j + i + 1 + x[jv] -= v * xi + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + (i+1)*incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + x[j] -= v * xi + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix -= incX + } +} + +// Ssymv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssymv(ul blas.Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + // Check inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda > 1 && lda < n { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + // Quick return if possible + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY > 0 { + Implementation{}.Sscal(n, beta, y, incY) + } else { + Implementation{}.Sscal(n, beta, y, -incY) + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := x[i] * a[i*lda+i] + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jp := j + i + 1 + sum += x[jp] * v + y[jy] += xv * v + jy += incY + } + y[iy] += alpha * sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := x[ix] * a[i*lda+i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + jy := ky + xv := alpha * x[i] + atmp := a[i*lda : i*lda+i] + var sum float32 + for j, v := range atmp { + sum += x[j] * v + y[jy] += xv * v + jy += incY + } + sum += x[i] * a[i*lda+i] + sum *= alpha + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xv := alpha * x[ix] + atmp := a[i*lda : i*lda+i] + var sum float32 + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + sum += x[ix] * a[i*lda+i] + sum *= alpha + y[iy] += sum + ix += incX + iy += incY + } +} + +// Stbmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda*(n-1)+k+1 > len(a) || lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonunit := d != blas.Unit + + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float32 + atmp := a[i*lda:] + xtmp := x[i:] + for j := 1; j < u; j++ { + sum += xtmp[j] * atmp[j] + } + if nonunit { + sum += xtmp[0] * atmp[0] + } else { + sum += xtmp[0] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float32 + atmp := a[i*lda:] + jx := incX + for j := 1; j < u; j++ { + sum += x[ix+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[0] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float32 + for j := l; j < k; j++ { + sum += x[i-k+j] * atmp[j] + } + if nonunit { + sum += x[i] * atmp[k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float32 + jx := l * incX + for j := l; j < k; j++ { + sum += x[ix-k*incX+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[k] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float32 + for j := 1; j < u; j++ { + sum += x[i-j] * a[(i-j)*lda+j] + } + if nonunit { + sum += x[i] * a[i*lda] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float32 + jx := incX + for j := 1; j < u; j++ { + sum += x[ix-jx] * a[(i-j)*lda+j] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var sum float32 + for j := 0; j < u; j++ { + sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] + } + if nonunit { + sum += x[i] * a[i*lda+k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var ( + sum float32 + jx int + ) + for j := 0; j < u; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda+k] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } +} + +// Stpmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if len(ap) < (n*(n+1))/2 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xi += v * xtmp[j] + } + x[i] = xi + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset += n - i + ix += incX + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset-i : offset] + for j, v := range atmp { + xi += v * x[j] + } + x[i] = xi + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset -= i + 1 + ix -= incX + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + jx := kx + (i+1)*incX + atmp := ap[offset+1 : offset+n-i] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + offset -= n - i + 1 + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + jx := kx + atmp := ap[offset-i : offset] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + ix += incX + offset += i + 2 + } +} + +// Stbsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, +// and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda*(n-1)+k+1 > len(a) || lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + // Form x = A^-1 x. + // Several cases below use subslices for speed improvement. + // The incX != 1 cases usually do not because incX may be negative. + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + atmp := a[i*lda+1:] + xtmp := x[i+1 : i+bands+1] + var sum float32 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + max := k + 1 + if i+max > n { + max = n - i + } + atmp := a[i*lda:] + var ( + jx int + sum float32 + ) + for j := 1; j < max; j++ { + jx += incX + sum += x[ix+jx] * atmp[j] + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[0] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + xtmp := x[i-bands : i] + var sum float32 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= atmp[bands] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * atmp[j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[bands] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var sum float32 + for j := 0; j < bands; j++ { + sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda] + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var sum float32 + xtmp := x[i+1 : i+1+bands] + for j, v := range xtmp { + sum += v * a[(i+j+1)*lda+k-j-1] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+k] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+k] + } + ix -= incX + } +} + +// Ssbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric band matrix with k super-diagonals, x and y are +// vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssbmv(ul blas.Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(n-1)+k+1 > len(a) || lda < k+1 { + panic(badLdA) + } + + // Quick return if possible + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up indexes + lenX := n + lenY := n + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // First form y = beta * y + if incY > 0 { + Implementation{}.Sscal(lenY, beta, y, incY) + } else { + Implementation{}.Sscal(lenY, beta, y, -incY) + } + + if alpha == 0 { + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[i] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[i+j] * v + y[iy+jy] += tmp * v + jy += incY + } + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[ix] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jx := incX + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + return + } + + // Casses where a has bands below the diagonal. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[i] + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += tmp * v + jy += incY + } + y[iy] += tmp * atmp[k] + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[ix] + jx := l * incX + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += tmp * atmp[k] + ix += incX + iy += incY + } +} + +// Ssyr performs the symmetric rank-one update +// A += alpha * x * x^T +// where A is an n×n symmetric matrix, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if alpha == 0 || n == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda+i : i*lda+n] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += v * tmp + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + jx := ix + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += x[jx] * tmp + jx += incX + } + } + ix += incX + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda:] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += tmp * v + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + atmp := a[i*lda:] + jx := kx + for j := 0; j < i+1; j++ { + atmp[j] += tmp * x[jx] + jx += incX + } + } + ix += incX + } +} + +// Ssyr2 performs the symmetric rank-two update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if lda*(n-1)+n > len(a) || lda < max(1, n) { + panic(badLdA) + } + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } +} + +// Stpsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if len(ap) < (n*(n+1))/2 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if n == 0 { + return + } + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + offset = n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + var sum float32 + for j, v := range atmp { + sum += v * xtmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix -= incX + offset -= n - i + 1 + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i : offset] + var sum float32 + for j, v := range atmp { + sum += v * x[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i : offset] + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix += incX + offset += i + 2 + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] -= v * xi + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] -= v * xi + } + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix -= incX + offset -= i + 1 + } +} + +// Sspmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspmv(ul blas.Uplo, n int, alpha float32, a []float32, x []float32, incX int, beta float32, y []float32, incY int) { + // Verify inputs + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if len(a) < (n*(n+1))/2 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + // Quick return if possible + if n == 0 || (alpha == 0 && beta == 1) { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY > 0 { + Implementation{}.Sscal(n, beta, y, incY) + } else { + Implementation{}.Sscal(n, beta, y, -incY) + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := a[offset] * x[i] + atmp := a[offset+1 : offset+n-i] + xtmp := x[i+1:] + jy := ky + (i+1)*incY + for j, v := range atmp { + sum += v * xtmp[j] + y[jy] += v * xv + jy += incY + } + y[iy] += alpha * sum + iy += incY + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := a[offset] * x[ix] + atmp := a[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + atmp := a[offset-i : offset] + jy := ky + var sum float32 + for j, v := range atmp { + sum += v * x[j] + y[jy] += v * xv + jy += incY + } + sum += a[offset] * x[i] + y[iy] += alpha * sum + iy += incY + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + atmp := a[offset-i : offset] + jx := kx + jy := ky + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + + sum += a[offset] * x[ix] + y[iy] += alpha * sum + ix += incX + iy += incY + offset += i + 2 + } +} + +// Sspr performs the symmetric rank-one operation +// A += alpha * x * x^T +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, a []float32) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if len(a) < (n*(n+1))/2 { + panic(badLdA) + } + if alpha == 0 || n == 0 { + return + } + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + atmp := a[offset:] + xv := alpha * x[i] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + i*incX + atmp := a[offset:] + xv := alpha * x[ix] + for j := 0; j < n-i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := a[offset-i:] + xv := alpha * x[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := a[offset-i:] + xv := alpha * x[ix] + for j := 0; j <= i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += i + 2 + } +} + +// Sspr2 performs the symmetric rank-2 update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, ap []float32) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(badX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(badY) + } + if len(ap) < (n*(n+1))/2 { + panic(badLdA) + } + if alpha == 0 { + return + } + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xi := x[i] + yi := y[i] + xtmp := x[i:n] + ytmp := y[i:n] + for j, v := range xtmp { + atmp[j] += alpha * (xi*ytmp[j] + v*yi) + } + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + atmp := ap[offset:] + xi := x[ix] + yi := y[iy] + for j := 0; j < n-i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xi := x[i] + yi := y[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += alpha * (xi*y[j] + v*yi) + } + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + atmp := ap[offset-i:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += i + 2 + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level3double.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level3double.go new file mode 100644 index 00000000..39e754d0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level3double.go @@ -0,0 +1,833 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level3 = Implementation{} + +// Dtrsm solves one of the matrix equations +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a +// scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +func (Implementation) Dtrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if ldb < n { + panic(badLdB) + } + var k int + if s == blas.Left { + k = m + } else { + k = n + } + if lda*(k-1)+k > len(a) || lda < max(1, k) { + panic(badLdA) + } + if ldb*(m-1)+n > len(b) || ldb < max(1, n) { + panic(badLdB) + } + + if m == 0 || n == 0 { + return + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := m - 1; i >= 0; i-- { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + for j := range btmp { + btmp[j] *= alpha + } + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + if va != 0 { + f64.AxpyUnitaryTo(btmp, -va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + for j := 0; j < n; j++ { + btmp[j] *= tmp + } + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + for j := 0; j < n; j++ { + btmp[j] *= alpha + } + } + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f64.AxpyUnitaryTo(btmp, -va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + for j := 0; j < n; j++ { + btmp[j] *= tmp + } + } + } + return + } + // Cases where a is transposed + if ul == blas.Upper { + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + if va != 0 { + btmp := b[i*ldb : i*ldb+n] + f64.AxpyUnitaryTo(btmp, -va, btmpk, btmp) + } + } + if alpha != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= alpha + } + } + } + return + } + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + for i, va := range a[k*lda : k*lda+k] { + if va != 0 { + btmp := b[i*ldb : i*ldb+n] + f64.AxpyUnitaryTo(btmp, -va, btmpk, btmp) + } + } + if alpha != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= alpha + } + } + } + return + } + // Cases where a is to the right of X. + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + for j := 0; j < n; j++ { + btmp[j] *= alpha + } + } + for k, vb := range btmp { + if vb != 0 { + if btmp[k] != 0 { + if nonUnit { + btmp[k] /= a[k*lda+k] + } + btmpk := btmp[k+1 : n] + f64.AxpyUnitaryTo(btmpk, -btmp[k], a[k*lda+k+1:k*lda+n], btmpk) + } + } + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*lda : i*lda+n] + if alpha != 1 { + for j := 0; j < n; j++ { + btmp[j] *= alpha + } + } + for k := n - 1; k >= 0; k-- { + if btmp[k] != 0 { + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f64.AxpyUnitaryTo(btmp, -btmp[k], a[k*lda:k*lda+k], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*lda : i*lda+n] + for j := n - 1; j >= 0; j-- { + tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*lda : i*lda+n] + for j := 0; j < n; j++ { + tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda:j*lda+j], btmp) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } +} + +// Dsymm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha +// is a scalar. +func (Implementation) Dsymm(s blas.Side, ul blas.Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + if s != blas.Right && s != blas.Left { + panic("goblas: bad side") + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + var k int + if s == blas.Left { + k = m + } else { + k = n + } + if lda*(k-1)+k > len(a) || lda < max(1, k) { + panic(badLdA) + } + if ldb*(m-1)+n > len(b) || ldb < max(1, n) { + panic(badLdB) + } + if ldc*(m-1)+n > len(c) || ldc < max(1, n) { + panic(badLdC) + } + if m == 0 || n == 0 { + return + } + if alpha == 0 && beta == 1 { + return + } + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] *= beta + } + } + return + } + + isUpper := ul == blas.Upper + if s == blas.Left { + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + btmp := b[i*ldb : i*ldb+n] + ctmp := c[i*ldc : i*ldc+n] + for j, v := range btmp { + ctmp[j] *= beta + ctmp[j] += atmp * v + } + + for k := 0; k < i; k++ { + var atmp float64 + if isUpper { + atmp = a[k*lda+i] + } else { + atmp = a[i*lda+k] + } + atmp *= alpha + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyUnitaryTo(ctmp, atmp, b[k*ldb:k*ldb+n], ctmp) + } + for k := i + 1; k < m; k++ { + var atmp float64 + if isUpper { + atmp = a[i*lda+k] + } else { + atmp = a[k*lda+i] + } + atmp *= alpha + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyUnitaryTo(ctmp, atmp, b[k*ldb:k*ldb+n], ctmp) + } + } + return + } + if isUpper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + tmp := alpha * b[i*ldb+j] + var tmp2 float64 + atmp := a[j*lda+j+1 : j*lda+n] + btmp := b[i*ldb+j+1 : i*ldb+n] + ctmp := c[i*ldc+j+1 : i*ldc+n] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } + return + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + tmp := alpha * b[i*ldb+j] + var tmp2 float64 + atmp := a[j*lda : j*lda+j] + btmp := b[i*ldb : i*ldb+j] + ctmp := c[i*ldc : i*ldc+j] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } +} + +// Dsyrk performs one of the symmetric rank-k operations +// C = alpha * A * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and +// beta are scalars. +func (Implementation) Dsyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if ldc < n { + panic(badLdC) + } + var row, col int + if tA == blas.NoTrans { + row, col = n, k + } else { + row, col = k, n + } + if lda*(row-1)+col > len(a) || lda < max(1, col) { + panic(badLdA) + } + if ldc*(n-1)+n > len(c) || ldc < max(1, n) { + panic(badLdC) + } + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + atmp := a[i*lda : i*lda+k] + for jc, vc := range ctmp { + j := jc + i + ctmp[jc] = vc*beta + alpha*f64.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + for j, vc := range c[i*ldc : i*ldc+i+1] { + c[i*ldc+j] = vc*beta + alpha*f64.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f64.AxpyUnitaryTo(ctmp, tmp, a[l*lda+i:l*lda+n], ctmp) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 0 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f64.AxpyUnitaryTo(ctmp, tmp, a[l*lda:l*lda+i+1], ctmp) + } + } + } +} + +// Dsyr2k performs one of the symmetric rank 2k operations +// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and +// alpha and beta are scalars. +func (Implementation) Dsyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if ldc < n { + panic(badLdC) + } + var row, col int + if tA == blas.NoTrans { + row, col = n, k + } else { + row, col = k, n + } + if lda*(row-1)+col > len(a) || lda < max(1, col) { + panic(badLdA) + } + if ldb*(row-1)+col > len(b) || ldb < max(1, col) { + panic(badLdB) + } + if ldc*(n-1)+n > len(c) || ldc < max(1, n) { + panic(badLdC) + } + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc+i : i*ldc+n] + for jc := range ctmp { + j := i + jc + var tmp1, tmp2 float64 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[jc] *= beta + ctmp[jc] += alpha * (tmp1 + tmp2) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc : i*ldc+i+1] + for j := 0; j <= i; j++ { + var tmp1, tmp2 float64 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[j] *= beta + ctmp[j] += alpha * (tmp1 + tmp2) + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*lda+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb+i : l*ldb+n] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda+i : l*lda+n] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*lda+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb : l*ldb+i+1] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda : l*lda+i+1] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } +} + +// Dtrmm performs one of the matrix-matrix operations +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. +func (Implementation) Dtrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + var k int + if s == blas.Left { + k = m + } else { + k = n + } + if lda*(k-1)+k > len(a) || lda < max(1, k) { + panic(badLdA) + } + if ldb*(m-1)+n > len(b) || ldb < max(1, n) { + panic(badLdB) + } + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] *= tmp + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + tmp := alpha * va + if tmp != 0 { + f64.AxpyUnitaryTo(btmp, tmp, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + for i := m - 1; i >= 0; i-- { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] *= tmp + } + for k, va := range a[i*lda : i*lda+i] { + tmp := alpha * va + if tmp != 0 { + f64.AxpyUnitaryTo(btmp, tmp, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + btmp := b[i*ldb : i*ldb+n] + tmp := alpha * va + if tmp != 0 { + f64.AxpyUnitaryTo(btmp, tmp, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + } + return + } + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + for i, va := range a[k*lda : k*lda+k] { + btmp := b[i*ldb : i*ldb+n] + tmp := alpha * va + if tmp != 0 { + f64.AxpyUnitaryTo(btmp, tmp, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + } + return + } + // Cases where a is on the right + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + tmp := alpha * btmp[k] + if tmp != 0 { + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + for ja, v := range a[k*lda+k+1 : k*lda+n] { + j := ja + k + 1 + btmp[j] += tmp * v + } + } + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + tmp := alpha * btmp[k] + if tmp != 0 { + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f64.AxpyUnitaryTo(btmp, tmp, a[k*lda:k*lda+k], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j, vb := range btmp { + tmp := vb + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) + btmp[j] = alpha * tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := btmp[j] + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f64.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + btmp[j] = alpha * tmp + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level3single.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level3single.go new file mode 100644 index 00000000..d24ce78c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/level3single.go @@ -0,0 +1,845 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level3 = Implementation{} + +// Strsm solves one of the matrix equations +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a +// scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if ldb < n { + panic(badLdB) + } + var k int + if s == blas.Left { + k = m + } else { + k = n + } + if lda*(k-1)+k > len(a) || lda < max(1, k) { + panic(badLdA) + } + if ldb*(m-1)+n > len(b) || ldb < max(1, n) { + panic(badLdB) + } + + if m == 0 || n == 0 { + return + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := m - 1; i >= 0; i-- { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + for j := range btmp { + btmp[j] *= alpha + } + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + if va != 0 { + f32.AxpyUnitaryTo(btmp, -va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + for j := 0; j < n; j++ { + btmp[j] *= tmp + } + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + for j := 0; j < n; j++ { + btmp[j] *= alpha + } + } + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f32.AxpyUnitaryTo(btmp, -va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + for j := 0; j < n; j++ { + btmp[j] *= tmp + } + } + } + return + } + // Cases where a is transposed + if ul == blas.Upper { + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + if va != 0 { + btmp := b[i*ldb : i*ldb+n] + f32.AxpyUnitaryTo(btmp, -va, btmpk, btmp) + } + } + if alpha != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= alpha + } + } + } + return + } + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + for i, va := range a[k*lda : k*lda+k] { + if va != 0 { + btmp := b[i*ldb : i*ldb+n] + f32.AxpyUnitaryTo(btmp, -va, btmpk, btmp) + } + } + if alpha != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= alpha + } + } + } + return + } + // Cases where a is to the right of X. + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + for j := 0; j < n; j++ { + btmp[j] *= alpha + } + } + for k, vb := range btmp { + if vb != 0 { + if btmp[k] != 0 { + if nonUnit { + btmp[k] /= a[k*lda+k] + } + btmpk := btmp[k+1 : n] + f32.AxpyUnitaryTo(btmpk, -btmp[k], a[k*lda+k+1:k*lda+n], btmpk) + } + } + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*lda : i*lda+n] + if alpha != 1 { + for j := 0; j < n; j++ { + btmp[j] *= alpha + } + } + for k := n - 1; k >= 0; k-- { + if btmp[k] != 0 { + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f32.AxpyUnitaryTo(btmp, -btmp[k], a[k*lda:k*lda+k], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*lda : i*lda+n] + for j := n - 1; j >= 0; j-- { + tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*lda : i*lda+n] + for j := 0; j < n; j++ { + tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda:j*lda+j], btmp) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } +} + +// Ssymm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha +// is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssymm(s blas.Side, ul blas.Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + if s != blas.Right && s != blas.Left { + panic("goblas: bad side") + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + var k int + if s == blas.Left { + k = m + } else { + k = n + } + if lda*(k-1)+k > len(a) || lda < max(1, k) { + panic(badLdA) + } + if ldb*(m-1)+n > len(b) || ldb < max(1, n) { + panic(badLdB) + } + if ldc*(m-1)+n > len(c) || ldc < max(1, n) { + panic(badLdC) + } + if m == 0 || n == 0 { + return + } + if alpha == 0 && beta == 1 { + return + } + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] *= beta + } + } + return + } + + isUpper := ul == blas.Upper + if s == blas.Left { + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + btmp := b[i*ldb : i*ldb+n] + ctmp := c[i*ldc : i*ldc+n] + for j, v := range btmp { + ctmp[j] *= beta + ctmp[j] += atmp * v + } + + for k := 0; k < i; k++ { + var atmp float32 + if isUpper { + atmp = a[k*lda+i] + } else { + atmp = a[i*lda+k] + } + atmp *= alpha + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyUnitaryTo(ctmp, atmp, b[k*ldb:k*ldb+n], ctmp) + } + for k := i + 1; k < m; k++ { + var atmp float32 + if isUpper { + atmp = a[i*lda+k] + } else { + atmp = a[k*lda+i] + } + atmp *= alpha + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyUnitaryTo(ctmp, atmp, b[k*ldb:k*ldb+n], ctmp) + } + } + return + } + if isUpper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + tmp := alpha * b[i*ldb+j] + var tmp2 float32 + atmp := a[j*lda+j+1 : j*lda+n] + btmp := b[i*ldb+j+1 : i*ldb+n] + ctmp := c[i*ldc+j+1 : i*ldc+n] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } + return + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + tmp := alpha * b[i*ldb+j] + var tmp2 float32 + atmp := a[j*lda : j*lda+j] + btmp := b[i*ldb : i*ldb+j] + ctmp := c[i*ldc : i*ldc+j] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } +} + +// Ssyrk performs one of the symmetric rank-k operations +// C = alpha * A * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and +// beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if ldc < n { + panic(badLdC) + } + var row, col int + if tA == blas.NoTrans { + row, col = n, k + } else { + row, col = k, n + } + if lda*(row-1)+col > len(a) || lda < max(1, col) { + panic(badLdA) + } + if ldc*(n-1)+n > len(c) || ldc < max(1, n) { + panic(badLdC) + } + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + atmp := a[i*lda : i*lda+k] + for jc, vc := range ctmp { + j := jc + i + ctmp[jc] = vc*beta + alpha*f32.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + for j, vc := range c[i*ldc : i*ldc+i+1] { + c[i*ldc+j] = vc*beta + alpha*f32.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f32.AxpyUnitaryTo(ctmp, tmp, a[l*lda+i:l*lda+n], ctmp) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 0 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f32.AxpyUnitaryTo(ctmp, tmp, a[l*lda:l*lda+i+1], ctmp) + } + } + } +} + +// Ssyr2k performs one of the symmetric rank 2k operations +// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and +// alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if ldc < n { + panic(badLdC) + } + var row, col int + if tA == blas.NoTrans { + row, col = n, k + } else { + row, col = k, n + } + if lda*(row-1)+col > len(a) || lda < max(1, col) { + panic(badLdA) + } + if ldb*(row-1)+col > len(b) || ldb < max(1, col) { + panic(badLdB) + } + if ldc*(n-1)+n > len(c) || ldc < max(1, n) { + panic(badLdC) + } + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc+i : i*ldc+n] + for jc := range ctmp { + j := i + jc + var tmp1, tmp2 float32 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[jc] *= beta + ctmp[jc] += alpha * (tmp1 + tmp2) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc : i*ldc+i+1] + for j := 0; j <= i; j++ { + var tmp1, tmp2 float32 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[j] *= beta + ctmp[j] += alpha * (tmp1 + tmp2) + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*lda+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb+i : l*ldb+n] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda+i : l*lda+n] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*lda+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb : l*ldb+i+1] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda : l*lda+i+1] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } +} + +// Strmm performs one of the matrix-matrix operations +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + var k int + if s == blas.Left { + k = m + } else { + k = n + } + if lda*(k-1)+k > len(a) || lda < max(1, k) { + panic(badLdA) + } + if ldb*(m-1)+n > len(b) || ldb < max(1, n) { + panic(badLdB) + } + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] *= tmp + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + tmp := alpha * va + if tmp != 0 { + f32.AxpyUnitaryTo(btmp, tmp, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + for i := m - 1; i >= 0; i-- { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] *= tmp + } + for k, va := range a[i*lda : i*lda+i] { + tmp := alpha * va + if tmp != 0 { + f32.AxpyUnitaryTo(btmp, tmp, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + btmp := b[i*ldb : i*ldb+n] + tmp := alpha * va + if tmp != 0 { + f32.AxpyUnitaryTo(btmp, tmp, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + } + return + } + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + for i, va := range a[k*lda : k*lda+k] { + btmp := b[i*ldb : i*ldb+n] + tmp := alpha * va + if tmp != 0 { + f32.AxpyUnitaryTo(btmp, tmp, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + for j := 0; j < n; j++ { + btmpk[j] *= tmp + } + } + } + return + } + // Cases where a is on the right + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + tmp := alpha * btmp[k] + if tmp != 0 { + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + for ja, v := range a[k*lda+k+1 : k*lda+n] { + j := ja + k + 1 + btmp[j] += tmp * v + } + } + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + tmp := alpha * btmp[k] + if tmp != 0 { + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f32.AxpyUnitaryTo(btmp, tmp, a[k*lda:k*lda+k], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j, vb := range btmp { + tmp := vb + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) + btmp[j] = alpha * tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := btmp[j] + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f32.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + btmp[j] = alpha * tmp + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go new file mode 100644 index 00000000..24a8b7ed --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go @@ -0,0 +1,269 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "runtime" + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sgemm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C +// C = alpha * A^T * B + beta * C +// C = alpha * A * B^T + beta * C +// C = alpha * A^T * B^T + beta * C +// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is +// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or +// B are transposed. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgemm(tA, tB blas.Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if tB != blas.NoTrans && tB != blas.Trans && tB != blas.ConjTrans { + panic(badTranspose) + } + aTrans := tA == blas.Trans || tA == blas.ConjTrans + if aTrans { + checkSMatrix('a', k, m, a, lda) + } else { + checkSMatrix('a', m, k, a, lda) + } + bTrans := tB == blas.Trans || tB == blas.ConjTrans + if bTrans { + checkSMatrix('b', n, k, b, ldb) + } else { + checkSMatrix('b', k, n, b, ldb) + } + checkSMatrix('c', m, n, c, ldc) + + // scale c + if beta != 1 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + } + } + + sgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) +} + +func sgemmParallel(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // dgemmParallel computes a parallel matrix multiplication by partitioning + // a and b into sub-blocks, and updating c with the multiplication of the sub-block + // In all cases, + // A = [ A_11 A_12 ... A_1j + // A_21 A_22 ... A_2j + // ... + // A_i1 A_i2 ... A_ij] + // + // and same for B. All of the submatrix sizes are blockSize×blockSize except + // at the edges. + // + // In all cases, there is one dimension for each matrix along which + // C must be updated sequentially. + // Cij = \sum_k Aik Bki, (A * B) + // Cij = \sum_k Aki Bkj, (A^T * B) + // Cij = \sum_k Aik Bjk, (A * B^T) + // Cij = \sum_k Aki Bjk, (A^T * B^T) + // + // This code computes one {i, j} block sequentially along the k dimension, + // and computes all of the {i, j} blocks concurrently. This + // partitioning allows Cij to be updated in-place without race-conditions. + // Instead of launching a goroutine for each possible concurrent computation, + // a number of worker goroutines are created and channels are used to pass + // available and completed cases. + // + // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix + // multiplies, though this code does not copy matrices to attempt to eliminate + // cache misses. + + maxKLen := k + parBlocks := blocks(m, blockSize) * blocks(n, blockSize) + if parBlocks < minParBlock { + // The matrix multiplication is small in the dimensions where it can be + // computed concurrently. Just do it in serial. + sgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + } + + nWorkers := runtime.GOMAXPROCS(0) + if parBlocks < nWorkers { + nWorkers = parBlocks + } + // There is a tradeoff between the workers having to wait for work + // and a large buffer making operations slow. + buf := buffMul * nWorkers + if buf > parBlocks { + buf = parBlocks + } + + sendChan := make(chan subMul, buf) + + // Launch workers. A worker receives an {i, j} submatrix of c, and computes + // A_ik B_ki (or the transposed version) storing the result in c_ij. When the + // channel is finally closed, it signals to the waitgroup that it has finished + // computing. + var wg sync.WaitGroup + for i := 0; i < nWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // Make local copies of otherwise global variables to reduce shared memory. + // This has a noticeable effect on benchmarks in some cases. + alpha := alpha + aTrans := aTrans + bTrans := bTrans + m := m + n := n + for sub := range sendChan { + i := sub.i + j := sub.j + leni := blockSize + if i+leni > m { + leni = m - i + } + lenj := blockSize + if j+lenj > n { + lenj = n - j + } + + cSub := sliceView32(c, ldc, i, j, leni, lenj) + + // Compute A_ik B_kj for all k + for k := 0; k < maxKLen; k += blockSize { + lenk := blockSize + if k+lenk > maxKLen { + lenk = maxKLen - k + } + var aSub, bSub []float32 + if aTrans { + aSub = sliceView32(a, lda, k, i, lenk, leni) + } else { + aSub = sliceView32(a, lda, i, k, leni, lenk) + } + if bTrans { + bSub = sliceView32(b, ldb, j, k, lenj, lenk) + } else { + bSub = sliceView32(b, ldb, k, j, lenk, lenj) + } + sgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) + } + } + }() + } + + // Send out all of the {i, j} subblocks for computation. + for i := 0; i < m; i += blockSize { + for j := 0; j < n; j += blockSize { + sendChan <- subMul{ + i: i, + j: j, + } + } + } + close(sendChan) + wg.Wait() +} + +// sgemmSerial is serial matrix multiply +func sgemmSerial(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + switch { + case !aTrans && !bTrans: + sgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && !bTrans: + sgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case !aTrans && bTrans: + sgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && bTrans: + sgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + default: + panic("unreachable") + } +} + +// sgemmSerial where neither a nor b are transposed +func sgemmSerialNotNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for l, v := range a[i*lda : i*lda+k] { + tmp := alpha * v + if tmp != 0 { + f32.AxpyUnitaryTo(ctmp, tmp, b[l*ldb:l*ldb+n], ctmp) + } + } + } +} + +// sgemmSerial where neither a is transposed and b is not +func sgemmSerialTransNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + btmp := b[l*ldb : l*ldb+n] + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyUnitaryTo(ctmp, tmp, btmp, ctmp) + } + } + } +} + +// sgemmSerial where neither a is not transposed and b is +func sgemmSerialNotTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + atmp := a[i*lda : i*lda+k] + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] += alpha * f32.DotUnitary(atmp, b[j*ldb:j*ldb+k]) + } + } +} + +// sgemmSerial where both are transposed +func sgemmSerialTransTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) + } + } + } +} + +func sliceView32(a []float32, lda, i, j, r, c int) []float32 { + return a[i*lda+j : (i+r-1)*lda+j+c] +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash new file mode 100755 index 00000000..00d1b882 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +# Copyright ©2015 The Gonum Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +WARNING='//\ +// Float32 implementations are autogenerated and not directly tested.\ +' + +# Level1 routines. + +echo Generating level1single.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1single.go +cat level1double.go \ +| gofmt -r 'blas.Float64Level1 -> blas.Float32Level1' \ +\ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'blas.DrotmParams -> blas.SrotmParams' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyIncTo -> f32.AxpyIncTo' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.ScalInc -> f32.ScalInc' \ +| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ + -e 's_^// D_// S_' \ + -e "s_^\(func (Implementation) \)Id\(.*\)\$_$WARNING\1Is\2_" \ + -e 's_^// Id_// Is_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +>> level1single.go + +echo Generating level1single_sdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1single_sdot.go +cat level1double_ddot.go \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.DotInc -> f32.DotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1single_sdot.go + +echo Generating level1single_dsdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1single_dsdot.go +cat level1double_ddot.go \ +| gofmt -r '[]float64 -> []float32' \ +\ +| gofmt -r 'f64.DotInc -> f32.DdotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DdotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1Ds\2_" \ + -e 's_^// D_// Ds_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1single_dsdot.go + +echo Generating level1single_sdsdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1single_sdsdot.go +cat level1double_ddot.go \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.DotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)) -> alpha + float32(f32.DdotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)))' \ +| gofmt -r 'f64.DotUnitary(a, b) -> alpha + float32(f32.DdotUnitary(a, b))' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1Sds\2_" \ + -e 's_^// D\(.*\)$_// Sds\1 plus a constant_' \ + -e 's_\\sum_alpha + \\sum_' \ + -e 's/n int/n int, alpha float32/' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1single_sdsdot.go + + +# Level2 routines. + +echo Generating level2single.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level2single.go +cat level2double.go \ +| gofmt -r 'blas.Float64Level2 -> blas.Float32Level2' \ +\ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'Dscal -> Sscal' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyIncTo -> f32.AxpyIncTo' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.DotInc -> f32.DotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.Ger -> f32.Ger' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level2single.go + + +# Level3 routines. + +echo Generating level3single.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level3single.go +cat level3double.go \ +| gofmt -r 'blas.Float64Level3 -> blas.Float32Level3' \ +\ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level3single.go + +echo Generating sgemm.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > sgemm.go +cat dgemm.go \ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'sliceView64 -> sliceView32' \ +| gofmt -r 'checkDMatrix -> checkSMatrix' \ +\ +| gofmt -r 'dgemmParallel -> sgemmParallel' \ +| gofmt -r 'computeNumBlocks64 -> computeNumBlocks32' \ +| gofmt -r 'dgemmSerial -> sgemmSerial' \ +| gofmt -r 'dgemmSerialNotNot -> sgemmSerialNotNot' \ +| gofmt -r 'dgemmSerialTransNot -> sgemmSerialTransNot' \ +| gofmt -r 'dgemmSerialNotTrans -> sgemmSerialNotTrans' \ +| gofmt -r 'dgemmSerialTransTrans -> sgemmSerialTransTrans' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyIncTo -> f32.AxpyIncTo' \ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_^// d_// s_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> sgemm.go diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/README.md b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/README.md new file mode 100644 index 00000000..ee867bb7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/README.md @@ -0,0 +1,4 @@ +# Gonum floats [![GoDoc](https://godoc.org/gonum.org/v1/gonum/floats?status.svg)](https://godoc.org/gonum.org/v1/gonum/floats) + +Package floats provides a set of helper routines for dealing with slices of float64. +The functions avoid allocations to allow for use within tight loops without garbage collection overhead. diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/doc.go new file mode 100644 index 00000000..55d115e0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/doc.go @@ -0,0 +1,11 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package floats provides a set of helper routines for dealing with slices +// of float64. The functions avoid allocations to allow for use within tight +// loops without garbage collection overhead. +// +// The convention used is that when a slice is being modified in place, it has +// the name dst. +package floats diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/floats.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/floats.go new file mode 100644 index 00000000..81d85e80 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/floats/floats.go @@ -0,0 +1,928 @@ +// Copyright 2013 The Gonum Authors. All rights reserved. +// Use of this code is governed by a BSD-style +// license that can be found in the LICENSE file + +package floats + +import ( + "errors" + "math" + "sort" + "strconv" + + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Add adds, element-wise, the elements of s and dst, and stores in dst. +// Panics if the lengths of dst and s do not match. +func Add(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: length of the slices do not match") + } + f64.AxpyUnitaryTo(dst, 1, s, dst) +} + +// AddTo adds, element-wise, the elements of s and t and +// stores the result in dst. Panics if the lengths of s, t and dst do not match. +func AddTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic("floats: length of adders do not match") + } + if len(dst) != len(s) { + panic("floats: length of destination does not match length of adder") + } + f64.AxpyUnitaryTo(dst, 1, s, t) + return dst +} + +// AddConst adds the scalar c to all of the values in dst. +func AddConst(c float64, dst []float64) { + for i := range dst { + dst[i] += c + } +} + +// AddScaled performs dst = dst + alpha * s. +// It panics if the lengths of dst and s are not equal. +func AddScaled(dst []float64, alpha float64, s []float64) { + if len(dst) != len(s) { + panic("floats: length of destination and source to not match") + } + f64.AxpyUnitaryTo(dst, alpha, s, dst) +} + +// AddScaledTo performs dst = y + alpha * s, where alpha is a scalar, +// and dst, y and s are all slices. +// It panics if the lengths of dst, y, and s are not equal. +// +// At the return of the function, dst[i] = y[i] + alpha * s[i] +func AddScaledTo(dst, y []float64, alpha float64, s []float64) []float64 { + if len(dst) != len(s) || len(dst) != len(y) { + panic("floats: lengths of slices do not match") + } + f64.AxpyUnitaryTo(dst, alpha, s, y) + return dst +} + +// argsort is a helper that implements sort.Interface, as used by +// Argsort. +type argsort struct { + s []float64 + inds []int +} + +func (a argsort) Len() int { + return len(a.s) +} + +func (a argsort) Less(i, j int) bool { + return a.s[i] < a.s[j] +} + +func (a argsort) Swap(i, j int) { + a.s[i], a.s[j] = a.s[j], a.s[i] + a.inds[i], a.inds[j] = a.inds[j], a.inds[i] +} + +// Argsort sorts the elements of dst while tracking their original order. +// At the conclusion of Argsort, dst will contain the original elements of dst +// but sorted in increasing order, and inds will contain the original position +// of the elements in the slice such that dst[i] = origDst[inds[i]]. +// It panics if the lengths of dst and inds do not match. +func Argsort(dst []float64, inds []int) { + if len(dst) != len(inds) { + panic("floats: length of inds does not match length of slice") + } + for i := range dst { + inds[i] = i + } + + a := argsort{s: dst, inds: inds} + sort.Sort(a) +} + +// Count applies the function f to every element of s and returns the number +// of times the function returned true. +func Count(f func(float64) bool, s []float64) int { + var n int + for _, val := range s { + if f(val) { + n++ + } + } + return n +} + +// CumProd finds the cumulative product of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. A panic will occur if the lengths of arguments +// do not match. +// +// At the return of the function, dst[i] = s[i] * s[i-1] * s[i-2] * ... +func CumProd(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: length of destination does not match length of the source") + } + if len(dst) == 0 { + return dst + } + return f64.CumProd(dst, s) +} + +// CumSum finds the cumulative sum of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. A panic will occur if the lengths of arguments +// do not match. +// +// At the return of the function, dst[i] = s[i] + s[i-1] + s[i-2] + ... +func CumSum(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: length of destination does not match length of the source") + } + if len(dst) == 0 { + return dst + } + return f64.CumSum(dst, s) +} + +// Distance computes the L-norm of s - t. See Norm for special cases. +// A panic will occur if the lengths of s and t do not match. +func Distance(s, t []float64, L float64) float64 { + if len(s) != len(t) { + panic("floats: slice lengths do not match") + } + if len(s) == 0 { + return 0 + } + var norm float64 + if L == 2 { + for i, v := range s { + diff := t[i] - v + norm = math.Hypot(norm, diff) + } + return norm + } + if L == 1 { + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm + } + if math.IsInf(L, 1) { + for i, v := range s { + absDiff := math.Abs(t[i] - v) + if absDiff > norm { + norm = absDiff + } + } + return norm + } + for i, v := range s { + norm += math.Pow(math.Abs(t[i]-v), L) + } + return math.Pow(norm, 1/L) +} + +// Div performs element-wise division dst / s +// and stores the value in dst. It panics if the +// lengths of s and t are not equal. +func Div(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: slice lengths do not match") + } + f64.Div(dst, s) +} + +// DivTo performs element-wise division s / t +// and stores the value in dst. It panics if the +// lengths of s, t, and dst are not equal. +func DivTo(dst, s, t []float64) []float64 { + if len(s) != len(t) || len(dst) != len(t) { + panic("floats: slice lengths do not match") + } + return f64.DivTo(dst, s, t) +} + +// Dot computes the dot product of s1 and s2, i.e. +// sum_{i = 1}^N s1[i]*s2[i]. +// A panic will occur if lengths of arguments do not match. +func Dot(s1, s2 []float64) float64 { + if len(s1) != len(s2) { + panic("floats: lengths of the slices do not match") + } + return f64.DotUnitary(s1, s2) +} + +// Equal returns true if the slices have equal lengths and +// all elements are numerically identical. +func Equal(s1, s2 []float64) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if s2[i] != val { + return false + } + } + return true +} + +// EqualApprox returns true if the slices have equal lengths and +// all element pairs have an absolute tolerance less than tol or a +// relative tolerance less than tol. +func EqualApprox(s1, s2 []float64, tol float64) bool { + if len(s1) != len(s2) { + return false + } + for i, a := range s1 { + if !EqualWithinAbsOrRel(a, s2[i], tol, tol) { + return false + } + } + return true +} + +// EqualFunc returns true if the slices have the same lengths +// and the function returns true for all element pairs. +func EqualFunc(s1, s2 []float64, f func(float64, float64) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if !f(val, s2[i]) { + return false + } + } + return true +} + +// EqualWithinAbs returns true if a and b have an absolute +// difference of less than tol. +func EqualWithinAbs(a, b, tol float64) bool { + return a == b || math.Abs(a-b) <= tol +} + +const minNormalFloat64 = 2.2250738585072014e-308 + +// EqualWithinRel returns true if the difference between a and b +// is not greater than tol times the greater value. +func EqualWithinRel(a, b, tol float64) bool { + if a == b { + return true + } + delta := math.Abs(a - b) + if delta <= minNormalFloat64 { + return delta <= tol*minNormalFloat64 + } + // We depend on the division in this relationship to identify + // infinities (we rely on the NaN to fail the test) otherwise + // we compare Infs of the same sign and evaluate Infs as equal + // independent of sign. + return delta/math.Max(math.Abs(a), math.Abs(b)) <= tol +} + +// EqualWithinAbsOrRel returns true if a and b are equal to within +// the absolute tolerance. +func EqualWithinAbsOrRel(a, b, absTol, relTol float64) bool { + if EqualWithinAbs(a, b, absTol) { + return true + } + return EqualWithinRel(a, b, relTol) +} + +// EqualWithinULP returns true if a and b are equal to within +// the specified number of floating point units in the last place. +func EqualWithinULP(a, b float64, ulp uint) bool { + if a == b { + return true + } + if math.IsNaN(a) || math.IsNaN(b) { + return false + } + if math.Signbit(a) != math.Signbit(b) { + return math.Float64bits(math.Abs(a))+math.Float64bits(math.Abs(b)) <= uint64(ulp) + } + return ulpDiff(math.Float64bits(a), math.Float64bits(b)) <= uint64(ulp) +} + +func ulpDiff(a, b uint64) uint64 { + if a > b { + return a - b + } + return b - a +} + +// EqualLengths returns true if all of the slices have equal length, +// and false otherwise. Returns true if there are no input slices. +func EqualLengths(slices ...[]float64) bool { + // This length check is needed: http://play.golang.org/p/sdty6YiLhM + if len(slices) == 0 { + return true + } + l := len(slices[0]) + for i := 1; i < len(slices); i++ { + if len(slices[i]) != l { + return false + } + } + return true +} + +// Find applies f to every element of s and returns the indices of the first +// k elements for which the f returns true, or all such elements +// if k < 0. +// Find will reslice inds to have 0 length, and will append +// found indices to inds. +// If k > 0 and there are fewer than k elements in s satisfying f, +// all of the found elements will be returned along with an error. +// At the return of the function, the input inds will be in an undetermined state. +func Find(inds []int, f func(float64) bool, s []float64, k int) ([]int, error) { + // inds is also returned to allow for calling with nil + + // Reslice inds to have zero length + inds = inds[:0] + + // If zero elements requested, can just return + if k == 0 { + return inds, nil + } + + // If k < 0, return all of the found indices + if k < 0 { + for i, val := range s { + if f(val) { + inds = append(inds, i) + } + } + return inds, nil + } + + // Otherwise, find the first k elements + nFound := 0 + for i, val := range s { + if f(val) { + inds = append(inds, i) + nFound++ + if nFound == k { + return inds, nil + } + } + } + // Finished iterating over the loop, which means k elements were not found + return inds, errors.New("floats: insufficient elements found") +} + +// HasNaN returns true if the slice s has any values that are NaN and false +// otherwise. +func HasNaN(s []float64) bool { + for _, v := range s { + if math.IsNaN(v) { + return true + } + } + return false +} + +// LogSpan returns a set of n equally spaced points in log space between, +// l and u where N is equal to len(dst). The first element of the +// resulting dst will be l and the final element of dst will be u. +// Panics if len(dst) < 2 +// Note that this call will return NaNs if either l or u are negative, and +// will return all zeros if l or u is zero. +// Also returns the mutated slice dst, so that it can be used in range, like: +// +// for i, x := range LogSpan(dst, l, u) { ... } +func LogSpan(dst []float64, l, u float64) []float64 { + Span(dst, math.Log(l), math.Log(u)) + for i := range dst { + dst[i] = math.Exp(dst[i]) + } + return dst +} + +// LogSumExp returns the log of the sum of the exponentials of the values in s. +// Panics if s is an empty slice. +func LogSumExp(s []float64) float64 { + // Want to do this in a numerically stable way which avoids + // overflow and underflow + // First, find the maximum value in the slice. + maxval := Max(s) + if math.IsInf(maxval, 0) { + // If it's infinity either way, the logsumexp will be infinity as well + // returning now avoids NaNs + return maxval + } + var lse float64 + // Compute the sumexp part + for _, val := range s { + lse += math.Exp(val - maxval) + } + // Take the log and add back on the constant taken out + return math.Log(lse) + maxval +} + +// Max returns the maximum value in the input slice. If the slice is empty, Max will panic. +func Max(s []float64) float64 { + return s[MaxIdx(s)] +} + +// MaxIdx returns the index of the maximum value in the input slice. If several +// entries have the maximum value, the first such index is returned. If the slice +// is empty, MaxIdx will panic. +func MaxIdx(s []float64) int { + if len(s) == 0 { + panic("floats: zero slice length") + } + max := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v > max || math.IsNaN(max) { + max = v + ind = i + } + } + return ind +} + +// Min returns the maximum value in the input slice. If the slice is empty, Min will panic. +func Min(s []float64) float64 { + return s[MinIdx(s)] +} + +// MinIdx returns the index of the minimum value in the input slice. If several +// entries have the maximum value, the first such index is returned. If the slice +// is empty, MinIdx will panic. +func MinIdx(s []float64) int { + if len(s) == 0 { + panic("floats: zero slice length") + } + min := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v < min || math.IsNaN(min) { + min = v + ind = i + } + } + return ind +} + +// Mul performs element-wise multiplication between dst +// and s and stores the value in dst. Panics if the +// lengths of s and t are not equal. +func Mul(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: slice lengths do not match") + } + for i, val := range s { + dst[i] *= val + } +} + +// MulTo performs element-wise multiplication between s +// and t and stores the value in dst. Panics if the +// lengths of s, t, and dst are not equal. +func MulTo(dst, s, t []float64) []float64 { + if len(s) != len(t) || len(dst) != len(t) { + panic("floats: slice lengths do not match") + } + for i, val := range t { + dst[i] = val * s[i] + } + return dst +} + +const ( + nanBits = 0x7ff8000000000000 + nanMask = 0xfff8000000000000 +) + +// NaNWith returns an IEEE 754 "quiet not-a-number" value with the +// payload specified in the low 51 bits of payload. +// The NaN returned by math.NaN has a bit pattern equal to NaNWith(1). +func NaNWith(payload uint64) float64 { + return math.Float64frombits(nanBits | (payload &^ nanMask)) +} + +// NaNPayload returns the lowest 51 bits payload of an IEEE 754 "quiet +// not-a-number". For values of f other than quiet-NaN, NaNPayload +// returns zero and false. +func NaNPayload(f float64) (payload uint64, ok bool) { + b := math.Float64bits(f) + if b&nanBits != nanBits { + return 0, false + } + return b &^ nanMask, true +} + +// NearestIdx returns the index of the element in s +// whose value is nearest to v. If several such +// elements exist, the lowest index is returned. +// NearestIdx panics if len(s) == 0. +func NearestIdx(s []float64, v float64) int { + if len(s) == 0 { + panic("floats: zero length slice") + } + switch { + case math.IsNaN(v): + return 0 + case math.IsInf(v, 1): + return MaxIdx(s) + case math.IsInf(v, -1): + return MinIdx(s) + } + var ind int + dist := math.NaN() + for i, val := range s { + newDist := math.Abs(v - val) + // A NaN distance will not be closer. + if math.IsNaN(newDist) { + continue + } + if newDist < dist || math.IsNaN(dist) { + dist = newDist + ind = i + } + } + return ind +} + +// NearestIdxForSpan return the index of a hypothetical vector created +// by Span with length n and bounds l and u whose value is closest +// to v. That is, NearestIdxForSpan(n, l, u, v) is equivalent to +// Nearest(Span(make([]float64, n),l,u),v) without an allocation. +// NearestIdxForSpan panics if n is less than two. +func NearestIdxForSpan(n int, l, u float64, v float64) int { + if n <= 1 { + panic("floats: span must have length >1") + } + if math.IsNaN(v) { + return 0 + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l) && !math.IsNaN(u): + return n - 1 + case math.IsNaN(u): + return 0 + case math.IsInf(l, 0) && math.IsInf(u, 0): + if l == u { + return 0 + } + if n%2 == 1 { + if !math.IsInf(v, 0) { + return n / 2 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n/2 + 1 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n / 2 + case math.IsInf(l, 0): + if v == l { + return 0 + } + return n - 1 + case math.IsInf(u, 0): + if v == u { + return n - 1 + } + return 0 + case math.IsInf(v, -1): + if l <= u { + return 0 + } + return n - 1 + case math.IsInf(v, 1): + if u <= l { + return 0 + } + return n - 1 + } + + // Special cases for v outside (l, u) and (u, l). + switch { + case l < u: + if v <= l { + return 0 + } + if v >= u { + return n - 1 + } + case l > u: + if v >= l { + return 0 + } + if v <= u { + return n - 1 + } + default: + return 0 + } + + // Can't guarantee anything about exactly halfway between + // because of floating point weirdness. + return int((float64(n)-1)/(u-l)*(v-l) + 0.5) +} + +// Norm returns the L norm of the slice S, defined as +// (sum_{i=1}^N s[i]^L)^{1/L} +// Special cases: +// L = math.Inf(1) gives the maximum absolute value. +// Does not correctly compute the zero norm (use Count). +func Norm(s []float64, L float64) float64 { + // Should this complain if L is not positive? + // Should this be done in log space for better numerical stability? + // would be more cost + // maybe only if L is high? + if len(s) == 0 { + return 0 + } + if L == 2 { + twoNorm := math.Abs(s[0]) + for i := 1; i < len(s); i++ { + twoNorm = math.Hypot(twoNorm, s[i]) + } + return twoNorm + } + var norm float64 + if L == 1 { + for _, val := range s { + norm += math.Abs(val) + } + return norm + } + if math.IsInf(L, 1) { + for _, val := range s { + norm = math.Max(norm, math.Abs(val)) + } + return norm + } + for _, val := range s { + norm += math.Pow(math.Abs(val), L) + } + return math.Pow(norm, 1/L) +} + +// ParseWithNA converts the string s to a float64 in v. +// If s equals missing, w is returned as 0, otherwise 1. +func ParseWithNA(s, missing string) (v, w float64, err error) { + if s == missing { + return 0, 0, nil + } + v, err = strconv.ParseFloat(s, 64) + if err == nil { + w = 1 + } + return v, w, err +} + +// Prod returns the product of the elements of the slice. +// Returns 1 if len(s) = 0. +func Prod(s []float64) float64 { + prod := 1.0 + for _, val := range s { + prod *= val + } + return prod +} + +// Reverse reverses the order of elements in the slice. +func Reverse(s []float64) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// Round returns the half away from zero rounded value of x with prec precision. +// +// Special cases are: +// Round(±0) = +0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + if x < 0 { + x = math.Ceil(intermed - 0.5) + } else { + x = math.Floor(intermed + 0.5) + } + + if x == 0 { + return 0 + } + + return x / pow +} + +// RoundEven returns the half even rounded value of x with prec precision. +// +// Special cases are: +// RoundEven(±0) = +0 +// RoundEven(±Inf) = ±Inf +// RoundEven(NaN) = NaN +func RoundEven(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + if isHalfway(intermed) { + correction, _ := math.Modf(math.Mod(intermed, 2)) + intermed += correction + if intermed > 0 { + x = math.Floor(intermed) + } else { + x = math.Ceil(intermed) + } + } else { + if x < 0 { + x = math.Ceil(intermed - 0.5) + } else { + x = math.Floor(intermed + 0.5) + } + } + + if x == 0 { + return 0 + } + + return x / pow +} + +func isHalfway(x float64) bool { + _, frac := math.Modf(x) + frac = math.Abs(frac) + return frac == 0.5 || (math.Nextafter(frac, math.Inf(-1)) < 0.5 && math.Nextafter(frac, math.Inf(1)) > 0.5) +} + +// Same returns true if the input slices have the same length and the all elements +// have the same value with NaN treated as the same. +func Same(s, t []float64) bool { + if len(s) != len(t) { + return false + } + for i, v := range s { + w := t[i] + if v != w && !(math.IsNaN(v) && math.IsNaN(w)) { + return false + } + } + return true +} + +// Scale multiplies every element in dst by the scalar c. +func Scale(c float64, dst []float64) { + if len(dst) > 0 { + f64.ScalUnitary(c, dst) + } +} + +// Span returns a set of N equally spaced points between l and u, where N +// is equal to the length of the destination. The first element of the destination +// is l, the final element of the destination is u. +// +// Panics if len(dst) < 2. +// +// Span also returns the mutated slice dst, so that it can be used in range expressions, +// like: +// +// for i, x := range Span(dst, l, u) { ... } +func Span(dst []float64, l, u float64) []float64 { + n := len(dst) + if n < 2 { + panic("floats: destination must have length >1") + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l): + for i := range dst[:len(dst)-1] { + dst[i] = math.NaN() + } + dst[len(dst)-1] = u + return dst + case math.IsNaN(u): + for i := range dst[1:] { + dst[i+1] = math.NaN() + } + dst[0] = l + return dst + case math.IsInf(l, 0) && math.IsInf(u, 0): + for i := range dst[:len(dst)/2] { + dst[i] = l + dst[len(dst)-i-1] = u + } + if len(dst)%2 == 1 { + if l != u { + dst[len(dst)/2] = 0 + } else { + dst[len(dst)/2] = l + } + } + return dst + case math.IsInf(l, 0): + for i := range dst[:len(dst)-1] { + dst[i] = l + } + dst[len(dst)-1] = u + return dst + case math.IsInf(u, 0): + for i := range dst[1:] { + dst[i+1] = u + } + dst[0] = l + return dst + } + + step := (u - l) / float64(n-1) + for i := range dst { + dst[i] = l + step*float64(i) + } + return dst +} + +// Sub subtracts, element-wise, the elements of s from dst. Panics if +// the lengths of dst and s do not match. +func Sub(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: length of the slices do not match") + } + f64.AxpyUnitaryTo(dst, -1, s, dst) +} + +// SubTo subtracts, element-wise, the elements of t from s and +// stores the result in dst. Panics if the lengths of s, t and dst do not match. +func SubTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic("floats: length of subtractor and subtractee do not match") + } + if len(dst) != len(s) { + panic("floats: length of destination does not match length of subtractor") + } + f64.AxpyUnitaryTo(dst, -1, t, s) + return dst +} + +// Sum returns the sum of the elements of the slice. +func Sum(s []float64) float64 { + var sum float64 + for _, val := range s { + sum += val + } + return sum +} + +// Within returns the first index i where s[i] <= v < s[i+1]. Within panics if: +// - len(s) < 2 +// - s is not sorted +func Within(s []float64, v float64) int { + if len(s) < 2 { + panic("floats: slice length less than 2") + } + if !sort.Float64sAreSorted(s) { + panic("floats: input slice not sorted") + } + if v < s[0] || v >= s[len(s)-1] || math.IsNaN(v) { + return -1 + } + for i, f := range s[1:] { + if v < f { + return i + } + } + return -1 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/.gitignore b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/.gitignore new file mode 100644 index 00000000..86e0d240 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/.gitignore @@ -0,0 +1 @@ +test.out \ No newline at end of file diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/README.md b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/README.md new file mode 100644 index 00000000..f0a9505e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/README.md @@ -0,0 +1,3 @@ +# Gonum graph [![GoDoc](https://godoc.org/gonum.org/v1/gonum/graph?status.svg)](https://godoc.org/gonum.org/v1/gonum/graph) + +This is a generalized graph package for the Go language. diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/doc.go new file mode 100644 index 00000000..86545039 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package graph defines graph interfaces. +package graph diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/graph.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/graph.go new file mode 100644 index 00000000..3dc3a875 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/graph.go @@ -0,0 +1,253 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Node is a graph node. It returns a graph-unique integer ID. +type Node interface { + ID() int64 +} + +// Edge is a graph edge. In directed graphs, the direction of the +// edge is given from -> to, otherwise the edge is semantically +// unordered. +type Edge interface { + From() Node + To() Node +} + +// WeightedEdge is a weighted graph edge. In directed graphs, the direction +// of the edge is given from -> to, otherwise the edge is semantically +// unordered. +type WeightedEdge interface { + Edge + Weight() float64 +} + +// Graph is a generalized graph. +type Graph interface { + // Has returns whether a node with the given ID exists + // within the graph. + Has(id int64) bool + + // Nodes returns all the nodes in the graph. + Nodes() []Node + + // From returns all nodes that can be reached directly + // from the node with the given ID. + From(id int64) []Node + + // HasEdgeBetween returns whether an edge exists between + // nodes with IDs xid and yid without considering direction. + HasEdgeBetween(xid, yid int64) bool + + // Edge returns the edge from u to v, with IDs uid and vid, + // if such an edge exists and nil otherwise. The node v + // must be directly reachable from u as defined by the + // From method. + Edge(uid, vid int64) Edge +} + +// Weighted is a weighted graph. +type Weighted interface { + Graph + + // WeightedEdge returns the weighted edge from u to v + // with IDs uid and vid if such an edge exists and + // nil otherwise. The node v must be directly + // reachable from u as defined by the From method. + WeightedEdge(uid, vid int64) WeightedEdge + + // Weight returns the weight for the edge between + // x and y with IDs xid and yid if Edge(xid, yid) + // returns a non-nil Edge. + // If x and y are the same node or there is no + // joining edge between the two nodes the weight + // value returned is implementation dependent. + // Weight returns true if an edge exists between + // x and y or if x and y have the same ID, false + // otherwise. + Weight(xid, yid int64) (w float64, ok bool) +} + +// Undirected is an undirected graph. +type Undirected interface { + Graph + + // EdgeBetween returns the edge between nodes x and y + // with IDs xid and yid. + EdgeBetween(xid, yid int64) Edge +} + +// WeightedUndirected is a weighted undirected graph. +type WeightedUndirected interface { + Weighted + + // WeightedEdgeBetween returns the edge between nodes + // x and y with IDs xid and yid. + WeightedEdgeBetween(xid, yid int64) WeightedEdge +} + +// Directed is a directed graph. +type Directed interface { + Graph + + // HasEdgeFromTo returns whether an edge exists + // in the graph from u to v with IDs uid and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + To(id int64) []Node +} + +// WeightedDirected is a weighted directed graph. +type WeightedDirected interface { + Weighted + + // HasEdgeFromTo returns whether an edge exists + // in the graph from u to v with the IDs uid and + // vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + To(id int64) []Node +} + +// NodeAdder is an interface for adding arbitrary nodes to a graph. +type NodeAdder interface { + // NewNode returns a new Node with a unique + // arbitrary ID. + NewNode() Node + + // Adds a node to the graph. AddNode panics if + // the added node ID matches an existing node ID. + AddNode(Node) +} + +// NodeRemover is an interface for removing nodes from a graph. +type NodeRemover interface { + // RemoveNode removes the node with the given ID + // from the graph, as well as any edges attached + // to it. If the node is not in the graph it is + // a no-op. + RemoveNode(id int64) +} + +// EdgeAdder is an interface for adding edges to a graph. +type EdgeAdder interface { + // NewEdge returns a new Edge from the source to the destination node. + NewEdge(from, to Node) Edge + + // SetEdge adds an edge from one node to another. + // If the graph supports node addition the nodes + // will be added if they do not exist, otherwise + // SetEdge will panic. + // The behavior of an EdgeAdder when the IDs + // returned by e.From and e.To are equal is + // implementation-dependent. + SetEdge(e Edge) +} + +// WeightedEdgeAdder is an interface for adding edges to a graph. +type WeightedEdgeAdder interface { + // NewWeightedEdge returns a new WeightedEdge from + // the source to the destination node. + NewWeightedEdge(from, to Node, weight float64) WeightedEdge + + // SetWeightedEdge adds an edge from one node to + // another. If the graph supports node addition + // the nodes will be added if they do not exist, + // otherwise SetWeightedEdge will panic. + // The behavior of a WeightedEdgeAdder when the IDs + // returned by e.From and e.To are equal is + // implementation-dependent. + SetWeightedEdge(e WeightedEdge) +} + +// EdgeRemover is an interface for removing nodes from a graph. +type EdgeRemover interface { + // RemoveEdge removes the edge with the given end + // IDs, leaving the terminal nodes. If the edge + // does not exist it is a no-op. + RemoveEdge(fid, tid int64) +} + +// Builder is a graph that can have nodes and edges added. +type Builder interface { + NodeAdder + EdgeAdder +} + +// WeightedBuilder is a graph that can have nodes and weighted edges added. +type WeightedBuilder interface { + NodeAdder + WeightedEdgeAdder +} + +// UndirectedBuilder is an undirected graph builder. +type UndirectedBuilder interface { + Undirected + Builder +} + +// UndirectedWeightedBuilder is an undirected weighted graph builder. +type UndirectedWeightedBuilder interface { + Undirected + WeightedBuilder +} + +// DirectedBuilder is a directed graph builder. +type DirectedBuilder interface { + Directed + Builder +} + +// DirectedWeightedBuilder is a directed weighted graph builder. +type DirectedWeightedBuilder interface { + Directed + WeightedBuilder +} + +// Copy copies nodes and edges as undirected edges from the source to the destination +// without first clearing the destination. Copy will panic if a node ID in the source +// graph matches a node ID in the destination. +// +// If the source is undirected and the destination is directed both directions will +// be present in the destination after the copy is complete. +func Copy(dst Builder, src Graph) { + nodes := src.Nodes() + for _, n := range nodes { + dst.AddNode(n) + } + for _, u := range nodes { + for _, v := range src.From(u.ID()) { + dst.SetEdge(dst.NewEdge(u, v)) + } + } +} + +// CopyWeighted copies nodes and edges as undirected edges from the source to the destination +// without first clearing the destination. Copy will panic if a node ID in the source +// graph matches a node ID in the destination. +// +// If the source is undirected and the destination is directed both directions will +// be present in the destination after the copy is complete. +// +// If the source is a directed graph, the destination is undirected, and a fundamental +// cycle exists with two nodes where the edge weights differ, the resulting destination +// graph's edge weight between those nodes is undefined. If there is a defined function +// to resolve such conflicts, an UndirectWeighted may be used to do this. +func CopyWeighted(dst WeightedBuilder, src Weighted) { + nodes := src.Nodes() + for _, n := range nodes { + dst.AddNode(n) + } + for _, u := range nodes { + for _, v := range src.From(u.ID()) { + dst.SetWeightedEdge(dst.NewWeightedEdge(u, v, src.WeightedEdge(u.ID(), v.ID()).Weight())) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go new file mode 100644 index 00000000..6535dcff --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linear provides common linear data structures. +package linear diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go new file mode 100644 index 00000000..62e19db6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go @@ -0,0 +1,73 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package linear + +import ( + "gonum.org/v1/gonum/graph" +) + +// NodeStack implements a LIFO stack of graph.Node. +type NodeStack []graph.Node + +// Len returns the number of graph.Nodes on the stack. +func (s *NodeStack) Len() int { return len(*s) } + +// Pop returns the last graph.Node on the stack and removes it +// from the stack. +func (s *NodeStack) Pop() graph.Node { + v := *s + v, n := v[:len(v)-1], v[len(v)-1] + *s = v + return n +} + +// Push adds the node n to the stack at the last position. +func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } + +// NodeQueue implements a FIFO queue. +type NodeQueue struct { + head int + data []graph.Node +} + +// Len returns the number of graph.Nodes in the queue. +func (q *NodeQueue) Len() int { return len(q.data) - q.head } + +// Enqueue adds the node n to the back of the queue. +func (q *NodeQueue) Enqueue(n graph.Node) { + if len(q.data) == cap(q.data) && q.head > 0 { + l := q.Len() + copy(q.data, q.data[q.head:]) + q.head = 0 + q.data = append(q.data[:l], n) + } else { + q.data = append(q.data, n) + } +} + +// Dequeue returns the graph.Node at the front of the queue and +// removes it from the queue. +func (q *NodeQueue) Dequeue() graph.Node { + if q.Len() == 0 { + panic("queue: empty queue") + } + + var n graph.Node + n, q.data[q.head] = q.data[q.head], nil + q.head++ + + if q.Len() == 0 { + q.head = 0 + q.data = q.data[:0] + } + + return n +} + +// Reset clears the queue for reuse. +func (q *NodeQueue) Reset() { + q.head = 0 + q.data = q.data[:0] +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go new file mode 100644 index 00000000..8e85e756 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ordered provides common sort ordering types. +package ordered diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go new file mode 100644 index 00000000..ea799931 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go @@ -0,0 +1,76 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ordered + +import "gonum.org/v1/gonum/graph" + +// ByID implements the sort.Interface sorting a slice of graph.Node +// by ID. +type ByID []graph.Node + +func (n ByID) Len() int { return len(n) } +func (n ByID) Less(i, j int) bool { return n[i].ID() < n[j].ID() } +func (n ByID) Swap(i, j int) { n[i], n[j] = n[j], n[i] } + +// BySliceValues implements the sort.Interface sorting a slice of +// []int64 lexically by the values of the []int64. +type BySliceValues [][]int64 + +func (c BySliceValues) Len() int { return len(c) } +func (c BySliceValues) Less(i, j int) bool { + a, b := c[i], c[j] + l := len(a) + if len(b) < l { + l = len(b) + } + for k, v := range a[:l] { + if v < b[k] { + return true + } + if v > b[k] { + return false + } + } + return len(a) < len(b) +} +func (c BySliceValues) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// BySliceIDs implements the sort.Interface sorting a slice of +// []graph.Node lexically by the IDs of the []graph.Node. +type BySliceIDs [][]graph.Node + +func (c BySliceIDs) Len() int { return len(c) } +func (c BySliceIDs) Less(i, j int) bool { + a, b := c[i], c[j] + l := len(a) + if len(b) < l { + l = len(b) + } + for k, v := range a[:l] { + if v.ID() < b[k].ID() { + return true + } + if v.ID() > b[k].ID() { + return false + } + } + return len(a) < len(b) +} +func (c BySliceIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// Int64s implements the sort.Interface sorting a slice of +// int64. +type Int64s []int64 + +func (s Int64s) Len() int { return len(s) } +func (s Int64s) Less(i, j int) bool { return s[i] < s[j] } +func (s Int64s) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Reverse reverses the order of nodes. +func Reverse(nodes []graph.Node) { + for i, j := 0, len(nodes)-1; i < j; i, j = i+1, j-1 { + nodes[i], nodes[j] = nodes[j], nodes[i] + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go new file mode 100644 index 00000000..3e3b53c0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides integer and graph.Node sets. +package set diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/same.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/same.go new file mode 100644 index 00000000..f95a4e12 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/same.go @@ -0,0 +1,36 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!safe + +package set + +import "unsafe" + +// same determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func same(a, b Nodes) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} + +// intsSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func intsSame(a, b Ints) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} + +// int64sSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func int64sSame(a, b Int64s) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go new file mode 100644 index 00000000..4ff4f4ed --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go @@ -0,0 +1,36 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine safe + +package set + +import "reflect" + +// same determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func same(a, b Nodes) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} + +// intsSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func intsSame(a, b Ints) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} + +// int64sSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func int64sSame(a, b Int64s) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/set.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/set.go new file mode 100644 index 00000000..aacff164 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/set/set.go @@ -0,0 +1,256 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package set + +import "gonum.org/v1/gonum/graph" + +// Ints is a set of int identifiers. +type Ints map[int]struct{} + +// The simple accessor methods for Ints are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Ints) Add(e int) { + s[e] = struct{}{} +} + +// Has reports the existence of the element in the set. +func (s Ints) Has(e int) bool { + _, ok := s[e] + return ok +} + +// Remove deletes the specified element from the set. +func (s Ints) Remove(e int) { + delete(s, e) +} + +// Count reports the number of elements stored in the set. +func (s Ints) Count() int { + return len(s) +} + +// IntsEqual reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func IntsEqual(a, b Ints) bool { + if intsSame(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// Int64s is a set of int64 identifiers. +type Int64s map[int64]struct{} + +// The simple accessor methods for Ints are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Int64s) Add(e int64) { + s[e] = struct{}{} +} + +// Has reports the existence of the element in the set. +func (s Int64s) Has(e int64) bool { + _, ok := s[e] + return ok +} + +// Remove deletes the specified element from the set. +func (s Int64s) Remove(e int64) { + delete(s, e) +} + +// Count reports the number of elements stored in the set. +func (s Int64s) Count() int { + return len(s) +} + +// Int64sEqual reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func Int64sEqual(a, b Int64s) bool { + if int64sSame(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// Nodes is a set of nodes keyed in their integer identifiers. +type Nodes map[int64]graph.Node + +// The simple accessor methods for Nodes are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Nodes) Add(n graph.Node) { + s[n.ID()] = n +} + +// Remove deletes the specified element from the set. +func (s Nodes) Remove(e graph.Node) { + delete(s, e.ID()) +} + +// Has reports the existence of the element in the set. +func (s Nodes) Has(n graph.Node) bool { + _, ok := s[n.ID()] + return ok +} + +// clear clears the set, possibly using the same backing store. +func (s *Nodes) clear() { + if len(*s) != 0 { + *s = make(Nodes) + } +} + +// Copy performs a perfect copy from src to dst (meaning the sets will +// be equal). +func (dst Nodes) Copy(src Nodes) Nodes { + if same(src, dst) { + return dst + } + + if len(dst) > 0 { + dst = make(Nodes, len(src)) + } + + for e, n := range src { + dst[e] = n + } + + return dst +} + +// Equal reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func Equal(a, b Nodes) bool { + if same(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// Union takes the union of a and b, and stores it in dst. +// +// The union of two sets, a and b, is the set containing all the +// elements of each, for instance: +// +// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} +// +// Since sets may not have repetition, unions of two sets that overlap +// do not contain repeat elements, that is: +// +// {a,b,c} UNION {b,c,d} = {a,b,c,d} +// +func (dst Nodes) Union(a, b Nodes) Nodes { + if same(a, b) { + return dst.Copy(a) + } + + if !same(a, dst) && !same(b, dst) { + dst.clear() + } + + if !same(dst, a) { + for e, n := range a { + dst[e] = n + } + } + + if !same(dst, b) { + for e, n := range b { + dst[e] = n + } + } + + return dst +} + +// Intersect takes the intersection of a and b, and stores it in dst. +// +// The intersection of two sets, a and b, is the set containing all +// the elements shared between the two sets, for instance: +// +// {a,b,c} INTERSECT {b,c,d} = {b,c} +// +// The intersection between a set and itself is itself, and thus +// effectively a copy operation: +// +// {a,b,c} INTERSECT {a,b,c} = {a,b,c} +// +// The intersection between two sets that share no elements is the empty +// set: +// +// {a,b,c} INTERSECT {d,e,f} = {} +// +func (dst Nodes) Intersect(a, b Nodes) Nodes { + var swap Nodes + + if same(a, b) { + return dst.Copy(a) + } + if same(a, dst) { + swap = b + } else if same(b, dst) { + swap = a + } else { + dst.clear() + + if len(a) > len(b) { + a, b = b, a + } + + for e, n := range a { + if _, ok := b[e]; ok { + dst[e] = n + } + } + + return dst + } + + for e := range dst { + if _, ok := swap[e]; !ok { + delete(dst, e) + } + } + + return dst +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go new file mode 100644 index 00000000..5f503c13 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go @@ -0,0 +1,54 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uid implements unique ID provision for graphs. +package uid + +import "gonum.org/v1/gonum/graph/internal/set" + +// Max is the maximum value of int64. +const Max = int64(^uint64(0) >> 1) + +// Set implements available ID storage. +type Set struct { + maxID int64 + used, free set.Int64s +} + +// NewSet returns a new Set. The returned value should not be passed except by pointer. +func NewSet() Set { + return Set{maxID: -1, used: make(set.Int64s), free: make(set.Int64s)} +} + +// NewID returns a new unique ID. The ID returned is not considered used +// until passed in a call to use. +func (s *Set) NewID() int64 { + for id := range s.free { + return id + } + if s.maxID != Max { + return s.maxID + 1 + } + for id := int64(0); id <= s.maxID+1; id++ { + if !s.used.Has(id) { + return id + } + } + panic("unreachable") +} + +// Use adds the id to the used IDs in the Set. +func (s *Set) Use(id int64) { + s.used.Add(id) + s.free.Remove(id) + if id > s.maxID { + s.maxID = id + } +} + +// Release frees the id for reuse. +func (s *Set) Release(id int64) { + s.free.Add(id) + s.used.Remove(id) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/multigraph.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/multigraph.go new file mode 100644 index 00000000..d558fbc7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/multigraph.go @@ -0,0 +1,168 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Line is an edge in a multigraph. A Line returns an ID that must +// distinguish Lines sharing Node end points. +type Line interface { + Edge + ID() int64 +} + +// WeightedLine is a weighted multigraph edge. +type WeightedLine interface { + Line + Weight() float64 +} + +// Multigraph is a generalized multigraph. +type Multigraph interface { + // Has returns whether the node with the given ID exists + // within the multigraph. + Has(id int64) bool + + // Nodes returns all the nodes in the multigraph. + Nodes() []Node + + // From returns all nodes that can be reached directly + // from the node with the given ID. + From(id int64) []Node + + // HasEdgeBetween returns whether an edge exists between + // nodes with IDs xid and yid without considering direction. + HasEdgeBetween(xid, yid int64) bool + + // Lines returns the lines from u to v, with IDs uid and + // vid, if any such lines exist and nil otherwise. The + // node v must be directly reachable from u as defined by + // the From method. + Lines(uid, vid int64) []Line +} + +// WeightedMultigraph is a weighted multigraph. +type WeightedMultigraph interface { + Multigraph + + // WeightedLines returns the weighted lines from u to v + // with IDs uid and vid if any such lines exist and nil + // otherwise. The node v must be directly reachable + // from u as defined by the From method. + WeightedLines(uid, vid int64) []WeightedLine +} + +// UndirectedMultigraph is an undirected multigraph. +type UndirectedMultigraph interface { + Multigraph + + // LinesBetween returns the lines between nodes x and y + // with IDs xid and yid. + LinesBetween(xid, yid int64) []Line +} + +// WeightedUndirectedMultigraph is a weighted undirected multigraph. +type WeightedUndirectedMultigraph interface { + WeightedMultigraph + + // WeightedLinesBetween returns the lines between nodes + // x and y with IDs xid and yid. + WeightedLinesBetween(xid, yid int64) []WeightedLine +} + +// DirectedMultigraph is a directed multigraph. +type DirectedMultigraph interface { + Multigraph + + // HasEdgeFromTo returns whether an edge exists + // in the multigraph from u to v with IDs uid + // and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + To(id int64) []Node +} + +// WeightedDirectedMultigraph is a weighted directed multigraph. +type WeightedDirectedMultigraph interface { + WeightedMultigraph + + // HasEdgeFromTo returns whether an edge exists + // in the multigraph from u to v with IDs uid + // and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + To(id int64) []Node +} + +// LineAdder is an interface for adding lines to a multigraph. +type LineAdder interface { + // NewLine returns a new Line from the source to the destination node. + NewLine(from, to Node) Line + + // SetLine adds a Line from one node to another. + // If the multigraph supports node addition the nodes + // will be added if they do not exist, otherwise + // SetLine will panic. + SetLine(l Line) +} + +// WeightedLineAdder is an interface for adding lines to a multigraph. +type WeightedLineAdder interface { + // NewWeightedLine returns a new WeightedLine from + // the source to the destination node. + NewWeightedLine(from, to Node, weight float64) WeightedLine + + // SetWeightedLine adds a weighted line from one node + // to another. If the multigraph supports node addition + // the nodes will be added if they do not exist, + // otherwise SetWeightedLine will panic. + SetWeightedLine(e WeightedLine) +} + +// LineRemover is an interface for removing lines from a multigraph. +type LineRemover interface { + // RemoveLine removes the line with the given end + // and line IDs, leaving the terminal nodes. If + // the line does not exist it is a no-op. + RemoveLine(fid, tid, id int64) +} + +// MultigraphBuilder is a multigraph that can have nodes and lines added. +type MultigraphBuilder interface { + NodeAdder + LineAdder +} + +// WeightedMultigraphBuilder is a multigraph that can have nodes and weighted lines added. +type WeightedMultigraphBuilder interface { + NodeAdder + WeightedLineAdder +} + +// UndirectedMultgraphBuilder is an undirected multigraph builder. +type UndirectedMultigraphBuilder interface { + UndirectedMultigraph + MultigraphBuilder +} + +// UndirectedWeightedMultigraphBuilder is an undirected weighted multigraph builder. +type UndirectedWeightedMultigraphBuilder interface { + UndirectedMultigraph + WeightedMultigraphBuilder +} + +// DirectedMultigraphBuilder is a directed multigraph builder. +type DirectedMultigraphBuilder interface { + DirectedMultigraph + MultigraphBuilder +} + +// DirectedWeightedMultigraphBuilder is a directed weighted multigraph builder. +type DirectedWeightedMultigraphBuilder interface { + DirectedMultigraph + WeightedMultigraphBuilder +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go new file mode 100644 index 00000000..717574db --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go @@ -0,0 +1,289 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/mat" +) + +// DirectedMatrix represents a directed graph using an adjacency +// matrix such that all IDs are in a contiguous block from 0 to n-1. +// Edges are stored implicitly as an edge weight, so edges stored in +// the graph are not recoverable. +type DirectedMatrix struct { + mat *mat.Dense + nodes []graph.Node + + self float64 + absent float64 +} + +// NewDirectedMatrix creates a directed dense graph with n nodes. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix { + matrix := make([]float64, n*n) + if init != 0 { + for i := range matrix { + matrix[i] = init + } + } + for i := 0; i < len(matrix); i += n + 1 { + matrix[i] = self + } + return &DirectedMatrix{ + mat: mat.NewDense(n, n, matrix), + self: self, + absent: absent, + } +} + +// NewDirectedMatrixFrom creates a directed dense graph with the given nodes. +// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may +// be in any order. If IDs are not contiguous NewDirectedMatrixFrom will panic. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewDirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *DirectedMatrix { + sort.Sort(ordered.ByID(nodes)) + for i, n := range nodes { + if int64(i) != n.ID() { + panic("simple: non-contiguous node IDs") + } + } + g := NewDirectedMatrix(len(nodes), init, self, absent) + g.nodes = nodes + return g +} + +// Node returns the node in the graph with the given ID. +func (g *DirectedMatrix) Node(id int64) graph.Node { + if !g.has(id) { + return nil + } + if g.nodes == nil { + return Node(id) + } + return g.nodes[id] +} + +// Has returns whether the node exists within the graph. +func (g *DirectedMatrix) Has(id int64) bool { + return g.has(id) +} + +func (g *DirectedMatrix) has(id int64) bool { + r, _ := g.mat.Dims() + return 0 <= id && id < int64(r) +} + +// Nodes returns all the nodes in the graph. +func (g *DirectedMatrix) Nodes() []graph.Node { + if g.nodes != nil { + nodes := make([]graph.Node, len(g.nodes)) + copy(nodes, g.nodes) + return nodes + } + r, _ := g.mat.Dims() + nodes := make([]graph.Node, r) + for i := 0; i < r; i++ { + nodes[i] = Node(i) + } + return nodes +} + +// Edges returns all the edges in the graph. +func (g *DirectedMatrix) Edges() []graph.Edge { + var edges []graph.Edge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := 0; j < r; j++ { + if i == j { + continue + } + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + return edges +} + +// From returns all nodes in g that can be reached directly from n. +func (g *DirectedMatrix) From(id int64) []graph.Node { + if !g.has(id) { + return nil + } + var neighbors []graph.Node + _, c := g.mat.Dims() + for j := 0; j < c; j++ { + if int64(j) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), j), g.absent) { + neighbors = append(neighbors, g.Node(int64(j))) + } + } + return neighbors +} + +// To returns all nodes in g that can reach directly to n. +func (g *DirectedMatrix) To(id int64) []graph.Node { + if !g.has(id) { + return nil + } + var neighbors []graph.Node + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(i, int(id)), g.absent) { + neighbors = append(neighbors, g.Node(int64(i))) + } + } + return neighbors +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *DirectedMatrix) HasEdgeBetween(xid, yid int64) bool { + if !g.has(xid) { + return false + } + if !g.has(yid) { + return false + } + // xid and yid are not greater than maximum int by this point. + return xid != yid && (!isSame(g.mat.At(int(xid), int(yid)), g.absent) || !isSame(g.mat.At(int(yid), int(xid)), g.absent)) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedMatrix) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdge(uid, vid) +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { + if g.HasEdgeFromTo(uid, vid) { + // xid and yid are not greater than maximum int by this point. + return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} + } + return nil +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *DirectedMatrix) HasEdgeFromTo(uid, vid int64) bool { + if !g.has(uid) { + return false + } + if !g.has(vid) { + return false + } + // uid and vid are not greater than maximum int by this point. + return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *DirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if g.has(xid) && g.has(yid) { + // xid and yid are not greater than maximum int by this point. + return g.mat.At(int(xid), int(yid)), true + } + return g.absent, false +} + +// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge +// are not in g or the edge is a self loop, SetEdge panics. +func (g *DirectedMatrix) SetEdge(e graph.Edge) { + g.setWeightedEdge(e, 1) +} + +// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g +// or the edge is a self loop, SetWeightedEdge panics. +func (g *DirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { + g.setWeightedEdge(e, e.Weight()) +} + +func (g *DirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { + fid := e.From().ID() + tid := e.To().ID() + if fid == tid { + panic("simple: set illegal edge") + } + if int64(int(fid)) != fid { + panic("simple: unavailable from node ID for dense graph") + } + if int64(int(tid)) != tid { + panic("simple: unavailable to node ID for dense graph") + } + // fid and tid are not greater than maximum int by this point. + g.mat.Set(int(fid), int(tid), weight) +} + +// RemoveEdge removes the edge with the given end point nodes from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *DirectedMatrix) RemoveEdge(fid, tid int64) { + if !g.has(fid) { + return + } + if !g.has(tid) { + return + } + // fid and tid are not greater than maximum int by this point. + g.mat.Set(int(fid), int(tid), g.absent) +} + +// Degree returns the in+out degree of n in g. +func (g *DirectedMatrix) Degree(id int64) int { + if !g.has(id) { + return 0 + } + var deg int + r, c := g.mat.Dims() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), i), g.absent) { + deg++ + } + } + for i := 0; i < c; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(i, int(id)), g.absent) { + deg++ + } + } + return deg +} + +// Matrix returns the mat.Matrix representation of the graph. The orientation +// of the matrix is such that the matrix entry at G_{ij} is the weight of the edge +// from node i to node j. +func (g *DirectedMatrix) Matrix() mat.Matrix { + // Prevent alteration of dimensions of the returned matrix. + m := *g.mat + return &m +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go new file mode 100644 index 00000000..5ae92626 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go @@ -0,0 +1,253 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/mat" +) + +// UndirectedMatrix represents an undirected graph using an adjacency +// matrix such that all IDs are in a contiguous block from 0 to n-1. +// Edges are stored implicitly as an edge weight, so edges stored in +// the graph are not recoverable. +type UndirectedMatrix struct { + mat *mat.SymDense + nodes []graph.Node + + self float64 + absent float64 +} + +// NewUndirectedMatrix creates an undirected dense graph with n nodes. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix { + matrix := make([]float64, n*n) + if init != 0 { + for i := range matrix { + matrix[i] = init + } + } + for i := 0; i < len(matrix); i += n + 1 { + matrix[i] = self + } + return &UndirectedMatrix{ + mat: mat.NewSymDense(n, matrix), + self: self, + absent: absent, + } +} + +// NewUndirectedMatrixFrom creates an undirected dense graph with the given nodes. +// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may +// be in any order. If IDs are not contiguous NewUndirectedMatrixFrom will panic. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewUndirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *UndirectedMatrix { + sort.Sort(ordered.ByID(nodes)) + for i, n := range nodes { + if int64(i) != n.ID() { + panic("simple: non-contiguous node IDs") + } + } + g := NewUndirectedMatrix(len(nodes), init, self, absent) + g.nodes = nodes + return g +} + +// Node returns the node in the graph with the given ID. +func (g *UndirectedMatrix) Node(id int64) graph.Node { + if !g.has(id) { + return nil + } + if g.nodes == nil { + return Node(id) + } + return g.nodes[id] +} + +// Has returns whether the node exists within the graph. +func (g *UndirectedMatrix) Has(id int64) bool { + return g.has(id) +} + +func (g *UndirectedMatrix) has(id int64) bool { + r := g.mat.Symmetric() + return 0 <= id && id < int64(r) +} + +// Nodes returns all the nodes in the graph. +func (g *UndirectedMatrix) Nodes() []graph.Node { + if g.nodes != nil { + nodes := make([]graph.Node, len(g.nodes)) + copy(nodes, g.nodes) + return nodes + } + r := g.mat.Symmetric() + nodes := make([]graph.Node, r) + for i := 0; i < r; i++ { + nodes[i] = Node(i) + } + return nodes +} + +// Edges returns all the edges in the graph. +func (g *UndirectedMatrix) Edges() []graph.Edge { + var edges []graph.Edge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := i + 1; j < r; j++ { + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + return edges +} + +// From returns all nodes in g that can be reached directly from n. +func (g *UndirectedMatrix) From(id int64) []graph.Node { + if !g.has(id) { + return nil + } + var neighbors []graph.Node + r := g.mat.Symmetric() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), i), g.absent) { + neighbors = append(neighbors, g.Node(int64(i))) + } + } + return neighbors +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *UndirectedMatrix) HasEdgeBetween(uid, vid int64) bool { + if !g.has(uid) { + return false + } + if !g.has(vid) { + return false + } + // uid and vid are not greater than maximum int by this point. + return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedMatrix) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *UndirectedMatrix) EdgeBetween(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. +func (g *UndirectedMatrix) WeightedEdgeBetween(uid, vid int64) graph.WeightedEdge { + if g.HasEdgeBetween(uid, vid) { + // uid and vid are not greater than maximum int by this point. + return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} + } + return nil +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *UndirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if g.has(xid) && g.has(yid) { + // xid and yid are not greater than maximum int by this point. + return g.mat.At(int(xid), int(yid)), true + } + return g.absent, false +} + +// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge are +// not in g or the edge is a self loop, SetEdge panics. +func (g *UndirectedMatrix) SetEdge(e graph.Edge) { + g.setWeightedEdge(e, 1) +} + +// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g +// or the edge is a self loop, SetWeightedEdge panics. +func (g *UndirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { + g.setWeightedEdge(e, e.Weight()) +} + +func (g *UndirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { + fid := e.From().ID() + tid := e.To().ID() + if fid == tid { + panic("simple: set illegal edge") + } + if int64(int(fid)) != fid { + panic("simple: unavailable from node ID for dense graph") + } + if int64(int(tid)) != tid { + panic("simple: unavailable to node ID for dense graph") + } + // fid and tid are not greater than maximum int by this point. + g.mat.SetSym(int(fid), int(tid), weight) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *UndirectedMatrix) RemoveEdge(fid, tid int64) { + if !g.has(fid) { + return + } + if !g.has(tid) { + return + } + // fid and tid are not greater than maximum int by this point. + g.mat.SetSym(int(fid), int(tid), g.absent) +} + +// Degree returns the degree of n in g. +func (g *UndirectedMatrix) Degree(id int64) int { + if !g.has(id) { + return 0 + } + var deg int + r := g.mat.Symmetric() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), i), g.absent) { + deg++ + } + } + return deg +} + +// Matrix returns the mat.Matrix representation of the graph. +func (g *UndirectedMatrix) Matrix() mat.Matrix { + // Prevent alteration of dimensions of the returned matrix. + m := *g.mat + return &m +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/directed.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/directed.go new file mode 100644 index 00000000..6be172f1 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/directed.go @@ -0,0 +1,222 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" +) + +// DirectedGraph implements a generalized directed graph. +type DirectedGraph struct { + nodes map[int64]graph.Node + from map[int64]map[int64]graph.Edge + to map[int64]map[int64]graph.Edge + + nodeIDs uid.Set +} + +// NewDirectedGraph returns a DirectedGraph. +func NewDirectedGraph() *DirectedGraph { + return &DirectedGraph{ + nodes: make(map[int64]graph.Node), + from: make(map[int64]map[int64]graph.Edge), + to: make(map[int64]map[int64]graph.Edge), + + nodeIDs: uid.NewSet(), + } +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *DirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *DirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.from[n.ID()] = make(map[int64]graph.Edge) + g.to[n.ID()] = make(map[int64]graph.Edge) + g.nodeIDs.Use(n.ID()) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *DirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.from[id] { + delete(g.to[from], id) + } + delete(g.from, id) + + for to := range g.to[id] { + delete(g.from[to], id) + } + delete(g.to, id) + + g.nodeIDs.Release(id) +} + +// NewEdge returns a new Edge from the source to the destination node. +func (g *DirectedGraph) NewEdge(from, to graph.Node) graph.Edge { + return &Edge{F: from, T: to} +} + +// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *DirectedGraph) SetEdge(e graph.Edge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if !g.Has(fid) { + g.AddNode(from) + } + if !g.Has(tid) { + g.AddNode(to) + } + + g.from[fid][tid] = e + g.to[tid][fid] = e +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *DirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.from[fid], tid) + delete(g.to[tid], fid) +} + +// Node returns the node in the graph with the given ID. +func (g *DirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Has returns whether the node exists within the graph. +func (g *DirectedGraph) Has(id int64) bool { + _, ok := g.nodes[id] + return ok +} + +// Nodes returns all the nodes in the graph. +func (g *DirectedGraph) Nodes() []graph.Node { + if len(g.nodes) == 0 { + return nil + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return nodes +} + +// Edges returns all the edges in the graph. +func (g *DirectedGraph) Edges() []graph.Edge { + var edges []graph.Edge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + return edges +} + +// From returns all nodes in g that can be reached directly from n. +func (g *DirectedGraph) From(id int64) []graph.Node { + if _, ok := g.from[id]; !ok { + return nil + } + + from := make([]graph.Node, len(g.from[id])) + i := 0 + for vid := range g.from[id] { + from[i] = g.nodes[vid] + i++ + } + return from +} + +// To returns all nodes in g that can reach directly to n. +func (g *DirectedGraph) To(id int64) []graph.Node { + if _, ok := g.from[id]; !ok { + return nil + } + + to := make([]graph.Node, len(g.to[id])) + i := 0 + for uid := range g.to[id] { + to[i] = g.nodes[uid] + i++ + } + return to +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *DirectedGraph) HasEdgeBetween(xid, yid int64) bool { + if _, ok := g.from[xid][yid]; ok { + return true + } + _, ok := g.from[yid][xid] + return ok +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedGraph) Edge(uid, vid int64) graph.Edge { + edge, ok := g.from[uid][vid] + if !ok { + return nil + } + return edge +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *DirectedGraph) HasEdgeFromTo(uid, vid int64) bool { + if _, ok := g.from[uid][vid]; !ok { + return false + } + return true +} + +// Degree returns the in+out degree of n in g. +func (g *DirectedGraph) Degree(id int64) int { + if _, ok := g.nodes[id]; !ok { + return 0 + } + return len(g.from[id]) + len(g.to[id]) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/doc.go new file mode 100644 index 00000000..9fbfb422 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/doc.go @@ -0,0 +1,7 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package simple provides a suite of simple graph implementations satisfying +// the gonum/graph interfaces. +package simple diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/simple.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/simple.go new file mode 100644 index 00000000..a9ddfd5f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/simple.go @@ -0,0 +1,51 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "math" + + "gonum.org/v1/gonum/graph" +) + +// Node is a simple graph node. +type Node int64 + +// ID returns the ID number of the node. +func (n Node) ID() int64 { + return int64(n) +} + +// Edge is a simple graph edge. +type Edge struct { + F, T graph.Node +} + +// From returns the from-node of the edge. +func (e Edge) From() graph.Node { return e.F } + +// To returns the to-node of the edge. +func (e Edge) To() graph.Node { return e.T } + +// WeightedEdge is a simple weighted graph edge. +type WeightedEdge struct { + F, T graph.Node + W float64 +} + +// From returns the from-node of the edge. +func (e WeightedEdge) From() graph.Node { return e.F } + +// To returns the to-node of the edge. +func (e WeightedEdge) To() graph.Node { return e.T } + +// Weight returns the weight of the edge. +func (e WeightedEdge) Weight() float64 { return e.W } + +// isSame returns whether two float64 values are the same where NaN values +// are equalable. +func isSame(a, b float64) bool { + return a == b || (math.IsNaN(a) && math.IsNaN(b)) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/undirected.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/undirected.go new file mode 100644 index 00000000..b13d6d48 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/undirected.go @@ -0,0 +1,203 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" +) + +// UndirectedGraph implements a generalized undirected graph. +type UndirectedGraph struct { + nodes map[int64]graph.Node + edges map[int64]map[int64]graph.Edge + + nodeIDs uid.Set +} + +// NewUndirectedGraph returns an UndirectedGraph. +func NewUndirectedGraph() *UndirectedGraph { + return &UndirectedGraph{ + nodes: make(map[int64]graph.Node), + edges: make(map[int64]map[int64]graph.Edge), + + nodeIDs: uid.NewSet(), + } +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *UndirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *UndirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.edges[n.ID()] = make(map[int64]graph.Edge) + g.nodeIDs.Use(n.ID()) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *UndirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.edges[id] { + delete(g.edges[from], id) + } + delete(g.edges, id) + + g.nodeIDs.Release(id) +} + +// NewEdge returns a new Edge from the source to the destination node. +func (g *UndirectedGraph) NewEdge(from, to graph.Node) graph.Edge { + return &Edge{F: from, T: to} +} + +// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *UndirectedGraph) SetEdge(e graph.Edge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if !g.Has(fid) { + g.AddNode(from) + } + if !g.Has(tid) { + g.AddNode(to) + } + + g.edges[fid][tid] = e + g.edges[tid][fid] = e +} + +// RemoveEdge removes the edge with the given end IDs from the graph, leaving the terminal nodes. +// If the edge does not exist it is a no-op. +func (g *UndirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.edges[fid], tid) + delete(g.edges[tid], fid) +} + +// Node returns the node in the graph with the given ID. +func (g *UndirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Has returns whether the node exists within the graph. +func (g *UndirectedGraph) Has(id int64) bool { + _, ok := g.nodes[id] + return ok +} + +// Nodes returns all the nodes in the graph. +func (g *UndirectedGraph) Nodes() []graph.Node { + if len(g.nodes) == 0 { + return nil + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return nodes +} + +// Edges returns all the edges in the graph. +func (g *UndirectedGraph) Edges() []graph.Edge { + if len(g.edges) == 0 { + return nil + } + var edges []graph.Edge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + return edges +} + +// From returns all nodes in g that can be reached directly from n. +func (g *UndirectedGraph) From(id int64) []graph.Node { + if !g.Has(id) { + return nil + } + + nodes := make([]graph.Node, len(g.edges[id])) + i := 0 + for from := range g.edges[id] { + nodes[i] = g.nodes[from] + i++ + } + return nodes +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *UndirectedGraph) HasEdgeBetween(xid, yid int64) bool { + _, ok := g.edges[xid][yid] + return ok +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.EdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *UndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { + edge, ok := g.edges[xid][yid] + if !ok { + return nil + } + return edge +} + +// Degree returns the degree of n in g. +func (g *UndirectedGraph) Degree(id int64) int { + if _, ok := g.nodes[id]; !ok { + return 0 + } + return len(g.edges[id]) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go new file mode 100644 index 00000000..591f9a51 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go @@ -0,0 +1,261 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" +) + +// WeightedDirectedGraph implements a generalized weighted directed graph. +type WeightedDirectedGraph struct { + nodes map[int64]graph.Node + from map[int64]map[int64]graph.WeightedEdge + to map[int64]map[int64]graph.WeightedEdge + + self, absent float64 + + nodeIDs uid.Set +} + +// NewWeightedDirectedGraph returns a WeightedDirectedGraph with the specified self and absent +// edge weight values. +func NewWeightedDirectedGraph(self, absent float64) *WeightedDirectedGraph { + return &WeightedDirectedGraph{ + nodes: make(map[int64]graph.Node), + from: make(map[int64]map[int64]graph.WeightedEdge), + to: make(map[int64]map[int64]graph.WeightedEdge), + + self: self, + absent: absent, + + nodeIDs: uid.NewSet(), + } +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *WeightedDirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *WeightedDirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.from[n.ID()] = make(map[int64]graph.WeightedEdge) + g.to[n.ID()] = make(map[int64]graph.WeightedEdge) + g.nodeIDs.Use(n.ID()) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *WeightedDirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.from[id] { + delete(g.to[from], id) + } + delete(g.from, id) + + for to := range g.to[id] { + delete(g.from[to], id) + } + delete(g.to, id) + + g.nodeIDs.Release(id) +} + +// NewWeightedEdge returns a new weighted edge from the source to the destination node. +func (g *WeightedDirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { + return &WeightedEdge{F: from, T: to, W: weight} +} + +// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *WeightedDirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if !g.Has(fid) { + g.AddNode(from) + } + if !g.Has(tid) { + g.AddNode(to) + } + + g.from[fid][tid] = e + g.to[tid][fid] = e +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *WeightedDirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.from[fid], tid) + delete(g.to[tid], fid) +} + +// Node returns the node in the graph with the given ID. +func (g *WeightedDirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Has returns whether the node exists within the graph. +func (g *WeightedDirectedGraph) Has(id int64) bool { + _, ok := g.nodes[id] + return ok +} + +// Nodes returns all the nodes in the graph. +func (g *WeightedDirectedGraph) Nodes() []graph.Node { + if len(g.from) == 0 { + return nil + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return nodes +} + +// Edges returns all the edges in the graph. +func (g *WeightedDirectedGraph) Edges() []graph.Edge { + var edges []graph.Edge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + return edges +} + +// WeightedEdges returns all the weighted edges in the graph. +func (g *WeightedDirectedGraph) WeightedEdges() []graph.WeightedEdge { + var edges []graph.WeightedEdge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + return edges +} + +// From returns all nodes in g that can be reached directly from n. +func (g *WeightedDirectedGraph) From(id int64) []graph.Node { + if _, ok := g.from[id]; !ok { + return nil + } + + from := make([]graph.Node, len(g.from[id])) + i := 0 + for vid := range g.from[id] { + from[i] = g.nodes[vid] + i++ + } + return from +} + +// To returns all nodes in g that can reach directly to n. +func (g *WeightedDirectedGraph) To(id int64) []graph.Node { + if _, ok := g.from[id]; !ok { + return nil + } + + to := make([]graph.Node, len(g.to[id])) + i := 0 + for uid := range g.to[id] { + to[i] = g.nodes[uid] + i++ + } + return to +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *WeightedDirectedGraph) HasEdgeBetween(xid, yid int64) bool { + if _, ok := g.from[xid][yid]; ok { + return true + } + _, ok := g.from[yid][xid] + return ok +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedDirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdge(uid, vid) +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedDirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { + edge, ok := g.from[uid][vid] + if !ok { + return nil + } + return edge +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *WeightedDirectedGraph) HasEdgeFromTo(uid, vid int64) bool { + if _, ok := g.from[uid][vid]; !ok { + return false + } + return true +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *WeightedDirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if to, ok := g.from[xid]; ok { + if e, ok := to[yid]; ok { + return e.Weight(), true + } + } + return g.absent, false +} + +// Degree returns the in+out degree of n in g. +func (g *WeightedDirectedGraph) Degree(id int64) int { + if _, ok := g.nodes[id]; !ok { + return 0 + } + return len(g.from[id]) + len(g.to[id]) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go new file mode 100644 index 00000000..525de1ec --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go @@ -0,0 +1,255 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" +) + +// WeightedUndirectedGraph implements a generalized weighted undirected graph. +type WeightedUndirectedGraph struct { + nodes map[int64]graph.Node + edges map[int64]map[int64]graph.WeightedEdge + + self, absent float64 + + nodeIDs uid.Set +} + +// NewWeightedUndirectedGraph returns an WeightedUndirectedGraph with the specified self and absent +// edge weight values. +func NewWeightedUndirectedGraph(self, absent float64) *WeightedUndirectedGraph { + return &WeightedUndirectedGraph{ + nodes: make(map[int64]graph.Node), + edges: make(map[int64]map[int64]graph.WeightedEdge), + + self: self, + absent: absent, + + nodeIDs: uid.NewSet(), + } +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *WeightedUndirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *WeightedUndirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.edges[n.ID()] = make(map[int64]graph.WeightedEdge) + g.nodeIDs.Use(n.ID()) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *WeightedUndirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.edges[id] { + delete(g.edges[from], id) + } + delete(g.edges, id) + + g.nodeIDs.Release(id) +} + +// NewWeightedEdge returns a new weighted edge from the source to the destination node. +func (g *WeightedUndirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { + return &WeightedEdge{F: from, T: to, W: weight} +} + +// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *WeightedUndirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if !g.Has(fid) { + g.AddNode(from) + } + if !g.Has(tid) { + g.AddNode(to) + } + + g.edges[fid][tid] = e + g.edges[tid][fid] = e +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *WeightedUndirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.edges[fid], tid) + delete(g.edges[tid], fid) +} + +// Node returns the node in the graph with the given ID. +func (g *WeightedUndirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Has returns whether the node exists within the graph. +func (g *WeightedUndirectedGraph) Has(id int64) bool { + _, ok := g.nodes[id] + return ok +} + +// Nodes returns all the nodes in the graph. +func (g *WeightedUndirectedGraph) Nodes() []graph.Node { + if len(g.nodes) == 0 { + return nil + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return nodes +} + +// Edges returns all the edges in the graph. +func (g *WeightedUndirectedGraph) Edges() []graph.Edge { + if len(g.edges) == 0 { + return nil + } + var edges []graph.Edge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + return edges +} + +// WeightedEdges returns all the weighted edges in the graph. +func (g *WeightedUndirectedGraph) WeightedEdges() []graph.WeightedEdge { + var edges []graph.WeightedEdge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + return edges +} + +// From returns all nodes in g that can be reached directly from n. +func (g *WeightedUndirectedGraph) From(id int64) []graph.Node { + if !g.Has(id) { + return nil + } + + nodes := make([]graph.Node, len(g.edges[id])) + i := 0 + for from := range g.edges[id] { + nodes[i] = g.nodes[from] + i++ + } + return nodes +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *WeightedUndirectedGraph) HasEdgeBetween(xid, yid int64) bool { + _, ok := g.edges[xid][yid] + return ok +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedUndirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedUndirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *WeightedUndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { + return g.WeightedEdgeBetween(xid, yid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. +func (g *WeightedUndirectedGraph) WeightedEdgeBetween(xid, yid int64) graph.WeightedEdge { + edge, ok := g.edges[xid][yid] + if !ok { + return nil + } + return edge +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *WeightedUndirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if n, ok := g.edges[xid]; ok { + if e, ok := n[yid]; ok { + return e.Weight(), true + } + } + return g.absent, false +} + +// Degree returns the degree of n in g. +func (g *WeightedUndirectedGraph) Degree(id int64) int { + if _, ok := g.nodes[id]; !ok { + return 0 + } + return len(g.edges[id]) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go new file mode 100644 index 00000000..c602b13a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go @@ -0,0 +1,250 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// DegeneracyOrdering returns the degeneracy ordering and the k-cores of +// the undirected graph g. +func DegeneracyOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) { + order, offsets := degeneracyOrdering(g) + + ordered.Reverse(order) + cores = make([][]graph.Node, len(offsets)) + offset := len(order) + for i, n := range offsets { + cores[i] = order[offset-n : offset] + offset -= n + } + return order, cores +} + +// KCore returns the k-core of the undirected graph g with nodes in an +// optimal ordering for the coloring number. +func KCore(k int, g graph.Undirected) []graph.Node { + order, offsets := degeneracyOrdering(g) + + var offset int + for _, n := range offsets[:k] { + offset += n + } + core := make([]graph.Node, len(order)-offset) + copy(core, order[offset:]) + return core +} + +// degeneracyOrdering is the common code for DegeneracyOrdering and KCore. It +// returns l, the nodes of g in optimal ordering for coloring number and +// s, a set of relative offsets into l for each k-core, where k is an index +// into s. +func degeneracyOrdering(g graph.Undirected) (l []graph.Node, s []int) { + nodes := g.Nodes() + + // The algorithm used here is essentially as described at + // http://en.wikipedia.org/w/index.php?title=Degeneracy_%28graph_theory%29&oldid=640308710 + + // Initialize an output list L in return parameters. + + // Compute a number d_v for each vertex v in G, + // the number of neighbors of v that are not already in L. + // Initially, these numbers are just the degrees of the vertices. + dv := make(map[int64]int, len(nodes)) + var ( + maxDegree int + neighbours = make(map[int64][]graph.Node) + ) + for _, n := range nodes { + id := n.ID() + adj := g.From(id) + neighbours[id] = adj + dv[id] = len(adj) + if len(adj) > maxDegree { + maxDegree = len(adj) + } + } + + // Initialize an array D such that D[i] contains a list of the + // vertices v that are not already in L for which d_v = i. + d := make([][]graph.Node, maxDegree+1) + for _, n := range nodes { + deg := dv[n.ID()] + d[deg] = append(d[deg], n) + } + + // Initialize k to 0. + k := 0 + // Repeat n times: + s = []int{0} + for range nodes { + // Scan the array cells D[0], D[1], ... until + // finding an i for which D[i] is nonempty. + var ( + i int + di []graph.Node + ) + for i, di = range d { + if len(di) != 0 { + break + } + } + + // Set k to max(k,i). + if i > k { + k = i + s = append(s, make([]int, k-len(s)+1)...) + } + + // Select a vertex v from D[i]. Add v to the + // beginning of L and remove it from D[i]. + var v graph.Node + v, d[i] = di[len(di)-1], di[:len(di)-1] + l = append(l, v) + s[k]++ + delete(dv, v.ID()) + + // For each neighbor w of v not already in L, + // subtract one from d_w and move w to the + // cell of D corresponding to the new value of d_w. + for _, w := range neighbours[v.ID()] { + dw, ok := dv[w.ID()] + if !ok { + continue + } + for i, n := range d[dw] { + if n.ID() == w.ID() { + d[dw][i], d[dw] = d[dw][len(d[dw])-1], d[dw][:len(d[dw])-1] + dw-- + d[dw] = append(d[dw], w) + break + } + } + dv[w.ID()] = dw + } + } + + return l, s +} + +// BronKerbosch returns the set of maximal cliques of the undirected graph g. +func BronKerbosch(g graph.Undirected) [][]graph.Node { + nodes := g.Nodes() + + // The algorithm used here is essentially BronKerbosch3 as described at + // http://en.wikipedia.org/w/index.php?title=Bron%E2%80%93Kerbosch_algorithm&oldid=656805858 + + p := make(set.Nodes, len(nodes)) + for _, n := range nodes { + p.Add(n) + } + x := make(set.Nodes) + var bk bronKerbosch + order, _ := degeneracyOrdering(g) + ordered.Reverse(order) + for _, v := range order { + neighbours := g.From(v.ID()) + nv := make(set.Nodes, len(neighbours)) + for _, n := range neighbours { + nv.Add(n) + } + bk.maximalCliquePivot(g, []graph.Node{v}, make(set.Nodes).Intersect(p, nv), make(set.Nodes).Intersect(x, nv)) + p.Remove(v) + x.Add(v) + } + return bk +} + +type bronKerbosch [][]graph.Node + +func (bk *bronKerbosch) maximalCliquePivot(g graph.Undirected, r []graph.Node, p, x set.Nodes) { + if len(p) == 0 && len(x) == 0 { + *bk = append(*bk, r) + return + } + + neighbours := bk.choosePivotFrom(g, p, x) + nu := make(set.Nodes, len(neighbours)) + for _, n := range neighbours { + nu.Add(n) + } + for _, v := range p { + if nu.Has(v) { + continue + } + vid := v.ID() + neighbours := g.From(vid) + nv := make(set.Nodes, len(neighbours)) + for _, n := range neighbours { + nv.Add(n) + } + + var found bool + for _, n := range r { + if n.ID() == vid { + found = true + break + } + } + var sr []graph.Node + if !found { + sr = append(r[:len(r):len(r)], v) + } + + bk.maximalCliquePivot(g, sr, make(set.Nodes).Intersect(p, nv), make(set.Nodes).Intersect(x, nv)) + p.Remove(v) + x.Add(v) + } +} + +func (*bronKerbosch) choosePivotFrom(g graph.Undirected, p, x set.Nodes) (neighbors []graph.Node) { + // TODO(kortschak): Investigate the impact of pivot choice that maximises + // |p ⋂ neighbours(u)| as a function of input size. Until then, leave as + // compile time option. + if !tomitaTanakaTakahashi { + for _, n := range p { + return g.From(n.ID()) + } + for _, n := range x { + return g.From(n.ID()) + } + panic("bronKerbosch: empty set") + } + + var ( + max = -1 + pivot graph.Node + ) + maxNeighbors := func(s set.Nodes) { + outer: + for _, u := range s { + nb := g.From(u.ID()) + c := len(nb) + if c <= max { + continue + } + for n := range nb { + if _, ok := p[int64(n)]; ok { + continue + } + c-- + if c <= max { + continue outer + } + } + max = c + pivot = u + neighbors = nb + } + } + maxNeighbors(p) + maxNeighbors(x) + if pivot == nil { + panic("bronKerbosch: empty set") + } + return neighbors +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go new file mode 100644 index 00000000..60ec75f0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go @@ -0,0 +1,106 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// Builder is a pure topological graph construction type. +type Builder interface { + AddNode(graph.Node) + SetEdge(graph.Edge) +} + +// CliqueGraph builds the clique graph of g in dst using Clique and CliqueGraphEdge +// nodes and edges. The nodes returned by calls to Nodes on the nodes and edges of +// the constructed graph are the cliques and the common nodes between cliques +// respectively. The dst graph is not cleared. +func CliqueGraph(dst Builder, g graph.Undirected) { + cliques := BronKerbosch(g) + + // Construct a consistent view of cliques in g. Sorting costs + // us a little, but not as much as the cliques themselves. + for _, c := range cliques { + sort.Sort(ordered.ByID(c)) + } + sort.Sort(ordered.BySliceIDs(cliques)) + + cliqueNodes := make(cliqueNodeSets, len(cliques)) + for id, c := range cliques { + s := make(set.Nodes, len(c)) + for _, n := range c { + s.Add(n) + } + ns := &nodeSet{Clique: Clique{id: int64(id), nodes: c}, nodes: s} + dst.AddNode(ns.Clique) + for _, n := range c { + nid := n.ID() + cliqueNodes[nid] = append(cliqueNodes[nid], ns) + } + } + + for _, cliques := range cliqueNodes { + for i, uc := range cliques { + for _, vc := range cliques[i+1:] { + // Retain the nodes that contribute to the + // edge between the cliques. + var edgeNodes []graph.Node + switch 1 { + case len(uc.Clique.nodes): + edgeNodes = []graph.Node{uc.Clique.nodes[0]} + case len(vc.Clique.nodes): + edgeNodes = []graph.Node{vc.Clique.nodes[0]} + default: + for _, n := range make(set.Nodes).Intersect(uc.nodes, vc.nodes) { + edgeNodes = append(edgeNodes, n) + } + sort.Sort(ordered.ByID(edgeNodes)) + } + + dst.SetEdge(CliqueGraphEdge{from: uc.Clique, to: vc.Clique, nodes: edgeNodes}) + } + } + } +} + +type cliqueNodeSets map[int64][]*nodeSet + +type nodeSet struct { + Clique + nodes set.Nodes +} + +// Clique is a node in a clique graph. +type Clique struct { + id int64 + nodes []graph.Node +} + +// ID returns the node ID. +func (n Clique) ID() int64 { return n.id } + +// Nodes returns the nodes in the clique. +func (n Clique) Nodes() []graph.Node { return n.nodes } + +// CliqueGraphEdge is an edge in a clique graph. +type CliqueGraphEdge struct { + from, to Clique + nodes []graph.Node +} + +// From returns the from node of the edge. +func (e CliqueGraphEdge) From() graph.Node { return e.from } + +// To returns the to node of the edge. +func (e CliqueGraphEdge) To() graph.Node { return e.to } + +// Nodes returns the common nodes in the cliques of the underlying graph +// corresponding to the from and to nodes in the clique graph. +func (e CliqueGraphEdge) Nodes() []graph.Node { return e.nodes } diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/doc.go new file mode 100644 index 00000000..4418a939 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package topo provides graph topology analysis functions. +package topo diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go new file mode 100644 index 00000000..8122a57e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go @@ -0,0 +1,281 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// johnson implements Johnson's "Finding all the elementary +// circuits of a directed graph" algorithm. SIAM J. Comput. 4(1):1975. +// +// Comments in the johnson methods are kept in sync with the comments +// and labels from the paper. +type johnson struct { + adjacent johnsonGraph // SCC adjacency list. + b []set.Ints // Johnson's "B-list". + blocked []bool + s int + + stack []graph.Node + + result [][]graph.Node +} + +// DirectedCyclesIn returns the set of elementary cycles in the graph g. +func DirectedCyclesIn(g graph.Directed) [][]graph.Node { + jg := johnsonGraphFrom(g) + j := johnson{ + adjacent: jg, + b: make([]set.Ints, len(jg.orig)), + blocked: make([]bool, len(jg.orig)), + } + + // len(j.nodes) is the order of g. + for j.s < len(j.adjacent.orig)-1 { + // We use the previous SCC adjacency to reduce the work needed. + sccs := TarjanSCC(j.adjacent.subgraph(j.s)) + // A_k = adjacency structure of strong component K with least + // vertex in subgraph of G induced by {s, s+1, ... ,n}. + j.adjacent = j.adjacent.sccSubGraph(sccs, 2) // Only allow SCCs with >= 2 vertices. + if j.adjacent.order() == 0 { + break + } + + // s = least vertex in V_k + if s := j.adjacent.leastVertexIndex(); s < j.s { + j.s = s + } + for i, v := range j.adjacent.orig { + if !j.adjacent.nodes.Has(v.ID()) { + continue + } + if len(j.adjacent.succ[v.ID()]) > 0 { + j.blocked[i] = false + j.b[i] = make(set.Ints) + } + } + //L3: + _ = j.circuit(j.s) + j.s++ + } + + return j.result +} + +// circuit is the CIRCUIT sub-procedure in the paper. +func (j *johnson) circuit(v int) bool { + f := false + n := j.adjacent.orig[v] + j.stack = append(j.stack, n) + j.blocked[v] = true + + //L1: + for w := range j.adjacent.succ[n.ID()] { + w := j.adjacent.indexOf(w) + if w == j.s { + // Output circuit composed of stack followed by s. + r := make([]graph.Node, len(j.stack)+1) + copy(r, j.stack) + r[len(r)-1] = j.adjacent.orig[j.s] + j.result = append(j.result, r) + f = true + } else if !j.blocked[w] { + if j.circuit(w) { + f = true + } + } + } + + //L2: + if f { + j.unblock(v) + } else { + for w := range j.adjacent.succ[n.ID()] { + j.b[j.adjacent.indexOf(w)].Add(v) + } + } + j.stack = j.stack[:len(j.stack)-1] + + return f +} + +// unblock is the UNBLOCK sub-procedure in the paper. +func (j *johnson) unblock(u int) { + j.blocked[u] = false + for w := range j.b[u] { + j.b[u].Remove(w) + if j.blocked[w] { + j.unblock(w) + } + } +} + +// johnsonGraph is an edge list representation of a graph with helpers +// necessary for Johnson's algorithm +type johnsonGraph struct { + // Keep the original graph nodes and a + // look-up to into the non-sparse + // collection of potentially sparse IDs. + orig []graph.Node + index map[int64]int + + nodes set.Int64s + succ map[int64]set.Int64s +} + +// johnsonGraphFrom returns a deep copy of the graph g. +func johnsonGraphFrom(g graph.Directed) johnsonGraph { + nodes := g.Nodes() + sort.Sort(ordered.ByID(nodes)) + c := johnsonGraph{ + orig: nodes, + index: make(map[int64]int, len(nodes)), + + nodes: make(set.Int64s, len(nodes)), + succ: make(map[int64]set.Int64s), + } + for i, u := range nodes { + uid := u.ID() + c.index[uid] = i + for _, v := range g.From(uid) { + if c.succ[uid] == nil { + c.succ[uid] = make(set.Int64s) + c.nodes.Add(uid) + } + c.nodes.Add(v.ID()) + c.succ[uid].Add(v.ID()) + } + } + return c +} + +// order returns the order of the graph. +func (g johnsonGraph) order() int { return g.nodes.Count() } + +// indexOf returns the index of the retained node for the given node ID. +func (g johnsonGraph) indexOf(id int64) int { + return g.index[id] +} + +// leastVertexIndex returns the index into orig of the least vertex. +func (g johnsonGraph) leastVertexIndex() int { + for _, v := range g.orig { + if g.nodes.Has(v.ID()) { + return g.indexOf(v.ID()) + } + } + panic("johnsonCycles: empty set") +} + +// subgraph returns a subgraph of g induced by {s, s+1, ... , n}. The +// subgraph is destructively generated in g. +func (g johnsonGraph) subgraph(s int) johnsonGraph { + sn := g.orig[s].ID() + for u, e := range g.succ { + if u < sn { + g.nodes.Remove(u) + delete(g.succ, u) + continue + } + for v := range e { + if v < sn { + g.succ[u].Remove(v) + } + } + } + return g +} + +// sccSubGraph returns the graph of the tarjan's strongly connected +// components with each SCC containing at least min vertices. +// sccSubGraph returns nil if there is no SCC with at least min +// members. +func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { + if len(g.nodes) == 0 { + g.nodes = nil + g.succ = nil + return g + } + sub := johnsonGraph{ + orig: g.orig, + index: g.index, + nodes: make(set.Int64s), + succ: make(map[int64]set.Int64s), + } + + var n int + for _, scc := range sccs { + if len(scc) < min { + continue + } + n++ + for _, u := range scc { + for _, v := range scc { + if _, ok := g.succ[u.ID()][v.ID()]; ok { + if sub.succ[u.ID()] == nil { + sub.succ[u.ID()] = make(set.Int64s) + sub.nodes.Add(u.ID()) + } + sub.nodes.Add(v.ID()) + sub.succ[u.ID()].Add(v.ID()) + } + } + } + } + if n == 0 { + g.nodes = nil + g.succ = nil + return g + } + + return sub +} + +// Nodes is required to satisfy Tarjan. +func (g johnsonGraph) Nodes() []graph.Node { + n := make([]graph.Node, 0, len(g.nodes)) + for id := range g.nodes { + n = append(n, johnsonGraphNode(id)) + } + return n +} + +// Successors is required to satisfy Tarjan. +func (g johnsonGraph) From(id int64) []graph.Node { + adj := g.succ[id] + if len(adj) == 0 { + return nil + } + succ := make([]graph.Node, 0, len(adj)) + for id := range adj { + succ = append(succ, johnsonGraphNode(id)) + } + return succ +} + +func (johnsonGraph) Has(int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) HasEdgeBetween(_, _ int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) Edge(_, _ int64) graph.Edge { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) HasEdgeFromTo(_, _ int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) To(int64) []graph.Node { + panic("topo: unintended use of johnsonGraph") +} + +type johnsonGraphNode int64 + +func (n johnsonGraphNode) ID() int64 { return int64(n) } diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go new file mode 100644 index 00000000..36171d6f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go @@ -0,0 +1,9 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !tomita + +package topo + +const tomitaTanakaTakahashi = false diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go new file mode 100644 index 00000000..5d4d0751 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go @@ -0,0 +1,81 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/linear" + "gonum.org/v1/gonum/graph/internal/set" +) + +// UndirectedCyclesIn returns a set of cycles that forms a cycle basis in the graph g. +// Any cycle in g can be constructed as a symmetric difference of its elements. +func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { + // From "An algorithm for finding a fundamental set of cycles of a graph" + // https://doi.org/10.1145/363219.363232 + + var cycles [][]graph.Node + done := make(set.Int64s) + var tree linear.NodeStack + for _, n := range g.Nodes() { + id := n.ID() + if done.Has(id) { + continue + } + done.Add(id) + + tree = tree[:0] + tree.Push(n) + from := sets{id: set.Int64s{}} + to := map[int64]graph.Node{id: n} + + for tree.Len() != 0 { + u := tree.Pop() + uid := u.ID() + adj := from[uid] + for _, v := range g.From(uid) { + vid := v.ID() + switch { + case uid == vid: + cycles = append(cycles, []graph.Node{u}) + case !from.has(vid): + done.Add(vid) + to[vid] = u + tree.Push(v) + from.add(uid, vid) + case !adj.Has(vid): + c := []graph.Node{v, u} + adj := from[vid] + p := to[uid] + for !adj.Has(p.ID()) { + c = append(c, p) + p = to[p.ID()] + } + c = append(c, p, c[0]) + cycles = append(cycles, c) + adj.Add(uid) + } + } + } + } + + return cycles +} + +type sets map[int64]set.Int64s + +func (s sets) add(uid, vid int64) { + e, ok := s[vid] + if !ok { + e = make(set.Int64s) + s[vid] = e + } + e.Add(uid) +} + +func (s sets) has(uid int64) bool { + _, ok := s[uid] + return ok +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go new file mode 100644 index 00000000..28f50e24 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go @@ -0,0 +1,197 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "fmt" + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// Unorderable is an error containing sets of unorderable graph.Nodes. +type Unorderable [][]graph.Node + +// Error satisfies the error interface. +func (e Unorderable) Error() string { + const maxNodes = 10 + var n int + for _, c := range e { + n += len(c) + } + if n > maxNodes { + // Don't return errors that are too long. + return fmt.Sprintf("topo: no topological ordering: %d nodes in %d cyclic components", n, len(e)) + } + return fmt.Sprintf("topo: no topological ordering: cyclic components: %v", [][]graph.Node(e)) +} + +func lexical(nodes []graph.Node) { sort.Sort(ordered.ByID(nodes)) } + +// Sort performs a topological sort of the directed graph g returning the 'from' to 'to' +// sort order. If a topological ordering is not possible, an Unorderable error is returned +// listing cyclic components in g with each cyclic component's members sorted by ID. When +// an Unorderable error is returned, each cyclic component's topological position within +// the sorted nodes is marked with a nil graph.Node. +func Sort(g graph.Directed) (sorted []graph.Node, err error) { + sccs := TarjanSCC(g) + return sortedFrom(sccs, lexical) +} + +// SortStabilized performs a topological sort of the directed graph g returning the 'from' +// to 'to' sort order, or the order defined by the in place order sort function where there +// is no unambiguous topological ordering. If a topological ordering is not possible, an +// Unorderable error is returned listing cyclic components in g with each cyclic component's +// members sorted by the provided order function. If order is nil, nodes are ordered lexically +// by node ID. When an Unorderable error is returned, each cyclic component's topological +// position within the sorted nodes is marked with a nil graph.Node. +func SortStabilized(g graph.Directed, order func([]graph.Node)) (sorted []graph.Node, err error) { + if order == nil { + order = lexical + } + sccs := tarjanSCCstabilized(g, order) + return sortedFrom(sccs, order) +} + +func sortedFrom(sccs [][]graph.Node, order func([]graph.Node)) ([]graph.Node, error) { + sorted := make([]graph.Node, 0, len(sccs)) + var sc Unorderable + for _, s := range sccs { + if len(s) != 1 { + order(s) + sc = append(sc, s) + sorted = append(sorted, nil) + continue + } + sorted = append(sorted, s[0]) + } + var err error + if sc != nil { + for i, j := 0, len(sc)-1; i < j; i, j = i+1, j-1 { + sc[i], sc[j] = sc[j], sc[i] + } + err = sc + } + ordered.Reverse(sorted) + return sorted, err +} + +// TarjanSCC returns the strongly connected components of the graph g using Tarjan's algorithm. +// +// A strongly connected component of a graph is a set of vertices where it's possible to reach any +// vertex in the set from any other (meaning there's a cycle between them.) +// +// Generally speaking, a directed graph where the number of strongly connected components is equal +// to the number of nodes is acyclic, unless you count reflexive edges as a cycle (which requires +// only a little extra testing.) +// +func TarjanSCC(g graph.Directed) [][]graph.Node { + return tarjanSCCstabilized(g, nil) +} + +func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.Node { + nodes := g.Nodes() + var succ func(id int64) []graph.Node + if order == nil { + succ = g.From + } else { + order(nodes) + ordered.Reverse(nodes) + + succ = func(id int64) []graph.Node { + to := g.From(id) + order(to) + ordered.Reverse(to) + return to + } + } + + t := tarjan{ + succ: succ, + + indexTable: make(map[int64]int, len(nodes)), + lowLink: make(map[int64]int, len(nodes)), + onStack: make(set.Int64s), + } + for _, v := range nodes { + if t.indexTable[v.ID()] == 0 { + t.strongconnect(v) + } + } + return t.sccs +} + +// tarjan implements Tarjan's strongly connected component finding +// algorithm. The implementation is from the pseudocode at +// +// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm?oldid=642744644 +// +type tarjan struct { + succ func(id int64) []graph.Node + + index int + indexTable map[int64]int + lowLink map[int64]int + onStack set.Int64s + + stack []graph.Node + + sccs [][]graph.Node +} + +// strongconnect is the strongconnect function described in the +// wikipedia article. +func (t *tarjan) strongconnect(v graph.Node) { + vID := v.ID() + + // Set the depth index for v to the smallest unused index. + t.index++ + t.indexTable[vID] = t.index + t.lowLink[vID] = t.index + t.stack = append(t.stack, v) + t.onStack.Add(vID) + + // Consider successors of v. + for _, w := range t.succ(vID) { + wID := w.ID() + if t.indexTable[wID] == 0 { + // Successor w has not yet been visited; recur on it. + t.strongconnect(w) + t.lowLink[vID] = min(t.lowLink[vID], t.lowLink[wID]) + } else if t.onStack.Has(wID) { + // Successor w is in stack s and hence in the current SCC. + t.lowLink[vID] = min(t.lowLink[vID], t.indexTable[wID]) + } + } + + // If v is a root node, pop the stack and generate an SCC. + if t.lowLink[vID] == t.indexTable[vID] { + // Start a new strongly connected component. + var ( + scc []graph.Node + w graph.Node + ) + for { + w, t.stack = t.stack[len(t.stack)-1], t.stack[:len(t.stack)-1] + t.onStack.Remove(w.ID()) + // Add w to current strongly connected component. + scc = append(scc, w) + if w.ID() == vID { + break + } + } + // Output the current strongly connected component. + t.sccs = append(t.sccs, scc) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go new file mode 100644 index 00000000..f85a0d6c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go @@ -0,0 +1,9 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build tomita + +package topo + +const tomitaTanakaTakahashi = true diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/topo.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/topo.go new file mode 100644 index 00000000..80945cf6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/topo/topo.go @@ -0,0 +1,68 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/traverse" +) + +// IsPathIn returns whether path is a path in g. +// +// As special cases, IsPathIn returns true for a zero length path or for +// a path of length 1 when the node in path exists in the graph. +func IsPathIn(g graph.Graph, path []graph.Node) bool { + switch len(path) { + case 0: + return true + case 1: + return g.Has(path[0].ID()) + default: + var canReach func(uid, vid int64) bool + switch g := g.(type) { + case graph.Directed: + canReach = g.HasEdgeFromTo + default: + canReach = g.HasEdgeBetween + } + + for i, u := range path[:len(path)-1] { + if !canReach(u.ID(), path[i+1].ID()) { + return false + } + } + return true + } +} + +// PathExistsIn returns whether there is a path in g starting at from extending +// to to. +// +// PathExistsIn exists as a helper function. If many tests for path existence +// are being performed, other approaches will be more efficient. +func PathExistsIn(g graph.Graph, from, to graph.Node) bool { + var t traverse.BreadthFirst + return t.Walk(g, from, func(n graph.Node, _ int) bool { return n.ID() == to.ID() }) != nil +} + +// ConnectedComponents returns the connected components of the undirected graph g. +func ConnectedComponents(g graph.Undirected) [][]graph.Node { + var ( + w traverse.DepthFirst + c []graph.Node + cc [][]graph.Node + ) + during := func(n graph.Node) { + c = append(c, n) + } + after := func() { + cc = append(cc, []graph.Node(nil)) + cc[len(cc)-1] = append(cc[len(cc)-1], c...) + c = c[:0] + } + w.WalkAll(g, nil, after, during) + + return cc +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/traverse/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/traverse/doc.go new file mode 100644 index 00000000..825b1f3a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/traverse/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package traverse provides basic graph traversal primitives. +package traverse diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go new file mode 100644 index 00000000..2f486bae --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go @@ -0,0 +1,199 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package traverse + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/linear" + "gonum.org/v1/gonum/graph/internal/set" +) + +var _ Graph = graph.Graph(nil) + +// Graph is the subset of graph.Graph necessary for graph traversal. +type Graph interface { + // From returns all nodes that can be reached directly + // from the node with the given ID. + From(id int64) []graph.Node + + // Edge returns the edge from u to v, with IDs uid and vid, + // if such an edge exists and nil otherwise. The node v + // must be directly reachable from u as defined by the + // From method. + Edge(uid, vid int64) graph.Edge +} + +// BreadthFirst implements stateful breadth-first graph traversal. +type BreadthFirst struct { + EdgeFilter func(graph.Edge) bool + Visit func(u, v graph.Node) + queue linear.NodeQueue + visited set.Int64s +} + +// Walk performs a breadth-first traversal of the graph g starting from the given node, +// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// traversal follows edges for which EdgeFilter(edge) is true and returns the first node +// for which until(node, depth) is true. During the traversal, if the Visit field is +// non-nil, it is called with the nodes joined by each followed edge. +func (b *BreadthFirst) Walk(g Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node { + if b.visited == nil { + b.visited = make(set.Int64s) + } + b.queue.Enqueue(from) + b.visited.Add(from.ID()) + + var ( + depth int + children int + untilNext = 1 + ) + for b.queue.Len() > 0 { + t := b.queue.Dequeue() + if until != nil && until(t, depth) { + return t + } + tid := t.ID() + for _, n := range g.From(tid) { + nid := n.ID() + if b.EdgeFilter != nil && !b.EdgeFilter(g.Edge(tid, nid)) { + continue + } + if b.visited.Has(nid) { + continue + } + if b.Visit != nil { + b.Visit(t, n) + } + b.visited.Add(nid) + children++ + b.queue.Enqueue(n) + } + if untilNext--; untilNext == 0 { + depth++ + untilNext = children + children = 0 + } + } + + return nil +} + +// WalkAll calls Walk for each unvisited node of the graph g using edges independent +// of their direction. The functions before and after are called prior to commencing +// and after completing each walk if they are non-nil respectively. The function +// during is called on each node as it is traversed. +func (b *BreadthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { + b.Reset() + for _, from := range g.Nodes() { + if b.Visited(from) { + continue + } + if before != nil { + before() + } + b.Walk(g, from, func(n graph.Node, _ int) bool { + if during != nil { + during(n) + } + return false + }) + if after != nil { + after() + } + } +} + +// Visited returned whether the node n was visited during a traverse. +func (b *BreadthFirst) Visited(n graph.Node) bool { + return b.visited.Has(n.ID()) +} + +// Reset resets the state of the traverser for reuse. +func (b *BreadthFirst) Reset() { + b.queue.Reset() + b.visited = nil +} + +// DepthFirst implements stateful depth-first graph traversal. +type DepthFirst struct { + EdgeFilter func(graph.Edge) bool + Visit func(u, v graph.Node) + stack linear.NodeStack + visited set.Int64s +} + +// Walk performs a depth-first traversal of the graph g starting from the given node, +// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// traversal follows edges for which EdgeFilter(edge) is true and returns the first node +// for which until(node) is true. During the traversal, if the Visit field is non-nil, it +// is called with the nodes joined by each followed edge. +func (d *DepthFirst) Walk(g Graph, from graph.Node, until func(graph.Node) bool) graph.Node { + if d.visited == nil { + d.visited = make(set.Int64s) + } + d.stack.Push(from) + d.visited.Add(from.ID()) + + for d.stack.Len() > 0 { + t := d.stack.Pop() + if until != nil && until(t) { + return t + } + tid := t.ID() + for _, n := range g.From(tid) { + nid := n.ID() + if d.EdgeFilter != nil && !d.EdgeFilter(g.Edge(tid, nid)) { + continue + } + if d.visited.Has(nid) { + continue + } + if d.Visit != nil { + d.Visit(t, n) + } + d.visited.Add(nid) + d.stack.Push(n) + } + } + + return nil +} + +// WalkAll calls Walk for each unvisited node of the graph g using edges independent +// of their direction. The functions before and after are called prior to commencing +// and after completing each walk if they are non-nil respectively. The function +// during is called on each node as it is traversed. +func (d *DepthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { + d.Reset() + for _, from := range g.Nodes() { + if d.Visited(from) { + continue + } + if before != nil { + before() + } + d.Walk(g, from, func(n graph.Node) bool { + if during != nil { + during(n) + } + return false + }) + if after != nil { + after() + } + } +} + +// Visited returned whether the node n was visited during a traverse. +func (d *DepthFirst) Visited(n graph.Node) bool { + return d.visited.Has(n.ID()) +} + +// Reset resets the state of the traverser for reuse. +func (d *DepthFirst) Reset() { + d.stack = d.stack[:0] + d.visited = nil +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/undirect.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/undirect.go new file mode 100644 index 00000000..49ba62bd --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/graph/undirect.go @@ -0,0 +1,226 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Undirect converts a directed graph to an undirected graph. +type Undirect struct { + G Directed +} + +var _ Undirected = Undirect{} + +// Has returns whether the node exists within the graph. +func (g Undirect) Has(id int64) bool { return g.G.Has(id) } + +// Nodes returns all the nodes in the graph. +func (g Undirect) Nodes() []Node { return g.G.Nodes() } + +// From returns all nodes in g that can be reached directly from u. +func (g Undirect) From(uid int64) []Node { + var nodes []Node + seen := make(map[int64]struct{}) + for _, n := range g.G.From(uid) { + seen[n.ID()] = struct{}{} + nodes = append(nodes, n) + } + for _, n := range g.G.To(uid) { + id := n.ID() + if _, ok := seen[id]; ok { + continue + } + seen[n.ID()] = struct{}{} + nodes = append(nodes, n) + } + return nodes +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g Undirect) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g Undirect) Edge(uid, vid int64) Edge { return g.EdgeBetween(uid, vid) } + +// EdgeBetween returns the edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g Undirect) EdgeBetween(xid, yid int64) Edge { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + if fe == nil && re == nil { + return nil + } + + return EdgePair{fe, re} +} + +// UndirectWeighted converts a directed weighted graph to an undirected weighted graph, +// resolving edge weight conflicts. +type UndirectWeighted struct { + G WeightedDirected + + // Absent is the value used to + // represent absent edge weights + // passed to Merge if the reverse + // edge is present. + Absent float64 + + // Merge defines how discordant edge + // weights in G are resolved. A merge + // is performed if at least one edge + // exists between the nodes being + // considered. The edges corresponding + // to the two weights are also passed, + // in the same order. + // The order of weight parameters + // passed to Merge is not defined, so + // the function should be commutative. + // If Merge is nil, the arithmetic + // mean is used to merge weights. + Merge func(x, y float64, xe, ye Edge) float64 +} + +var ( + _ Undirected = UndirectWeighted{} + _ WeightedUndirected = UndirectWeighted{} +) + +// Has returns whether the node exists within the graph. +func (g UndirectWeighted) Has(id int64) bool { return g.G.Has(id) } + +// Nodes returns all the nodes in the graph. +func (g UndirectWeighted) Nodes() []Node { return g.G.Nodes() } + +// From returns all nodes in g that can be reached directly from u. +func (g UndirectWeighted) From(uid int64) []Node { + var nodes []Node + seen := make(map[int64]struct{}) + for _, n := range g.G.From(uid) { + seen[n.ID()] = struct{}{} + nodes = append(nodes, n) + } + for _, n := range g.G.To(uid) { + id := n.ID() + if _, ok := seen[id]; ok { + continue + } + seen[n.ID()] = struct{}{} + nodes = append(nodes, n) + } + return nodes +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g UndirectWeighted) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g UndirectWeighted) Edge(uid, vid int64) Edge { return g.WeightedEdgeBetween(uid, vid) } + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g UndirectWeighted) WeightedEdge(uid, vid int64) WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g UndirectWeighted) EdgeBetween(xid, yid int64) Edge { + return g.WeightedEdgeBetween(xid, yid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g UndirectWeighted) WeightedEdgeBetween(xid, yid int64) WeightedEdge { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + if fe == nil && re == nil { + return nil + } + + f, ok := g.G.Weight(xid, yid) + if !ok { + f = g.Absent + } + r, ok := g.G.Weight(yid, xid) + if !ok { + r = g.Absent + } + + var w float64 + if g.Merge == nil { + w = (f + r) / 2 + } else { + w = g.Merge(f, r, fe, re) + } + return WeightedEdgePair{EdgePair: [2]Edge{fe, re}, W: w} +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node the internal node weight is returned. If there is no joining +// edge between the two nodes the weight value returned is zero. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g UndirectWeighted) Weight(xid, yid int64) (w float64, ok bool) { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + + f, fOk := g.G.Weight(xid, yid) + if !fOk { + f = g.Absent + } + r, rOK := g.G.Weight(yid, xid) + if !rOK { + r = g.Absent + } + ok = fOk || rOK + + if g.Merge == nil { + return (f + r) / 2, ok + } + return g.Merge(f, r, fe, re), ok +} + +// EdgePair is an opposed pair of directed edges. +type EdgePair [2]Edge + +// From returns the from node of the first non-nil edge, or nil. +func (e EdgePair) From() Node { + if e[0] != nil { + return e[0].From() + } else if e[1] != nil { + return e[1].From() + } + return nil +} + +// To returns the to node of the first non-nil edge, or nil. +func (e EdgePair) To() Node { + if e[0] != nil { + return e[0].To() + } else if e[1] != nil { + return e[1].To() + } + return nil +} + +// WeightedEdgePair is an opposed pair of directed edges. +type WeightedEdgePair struct { + EdgePair + W float64 +} + +// Weight returns the merged edge weights of the two edges. +func (e WeightedEdgePair) Weight() float64 { return e.W } diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s new file mode 100644 index 00000000..0a4c14c2 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s @@ -0,0 +1,134 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SI // SI = &x + MOVQ y_base+40(FP), DI // DI = &y + MOVQ n+64(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+88(FP), R8 // R8 = ix // Load the first index + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ iy+96(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + LEAQ (SI)(R8*1), SI // SI = &(x[ix]) + LEAQ (DI)(R9*1), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI // Separate Read/Write pointers + MOVQ incX+72(FP), R8 // R8 = incX + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ incY+80(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SI)(R8*1), X4 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVUPS (SI), X6 + MOVUPS (SI)(R8*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + ADDPD (DX)(R9*1), X5 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDPD (DX), X7 + ADDPD (DX)(R9*1), X9 + MOVUPS X3, (DI) // dst[i] = X_(i+1) + MOVUPS X5, (DI)(R9*1) + LEAQ (DI)(R9*2), DI + MOVUPS X7, (DI) + MOVUPS X9, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI), X3 + MOVUPS X3, (DI) // y[i] = X_i + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s new file mode 100644 index 00000000..cb57f4be --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s @@ -0,0 +1,141 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+56(FP), SI // SI = &x + MOVQ y_base+80(FP), DX // DX = &y + MOVQ n+104(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+128(FP), R8 // R8 = ix // Load the first index + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ iy+136(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVQ idst+32(FP), R10 // R10 = idst + SHLQ $4, R10 // R10 *= sizeof(complex128) + LEAQ (SI)(R8*1), SI // SI = &(x[ix]) + LEAQ (DX)(R9*1), DX // DX = &(y[iy]) + LEAQ (DI)(R10*1), DI // DI = &(dst[idst]) + MOVQ incX+112(FP), R8 // R8 = incX + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ incY+120(FP), R9 // R9 = incY + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVQ incDst+24(FP), R10 // R10 = incDst + SHLQ $4, R10 // R10 *= sizeof(complex128) + MOVUPS alpha+40(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SI)(R8*1), X4 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + + MOVUPS (SI), X6 + MOVUPS (SI)(R8*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + ADDPD (DX)(R9*1), X5 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDPD (DX), X7 + ADDPD (DX)(R9*1), X9 + MOVUPS X3, (DI) // dst[i] = X_(i+1) + MOVUPS X5, (DI)(R10*1) + LEAQ (DI)(R10*2), DI + MOVUPS X7, (DI) + MOVUPS X9, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + MOVUPS X3, (DI) // y[i] X_(i+1) + ADDQ R8, SI // SI += incX + ADDQ R9, DX // DX += incY + ADDQ R10, DI // DI += incDst + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s new file mode 100644 index 00000000..f1fddce7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s @@ -0,0 +1,122 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitary(alpha complex128, x, y []complex128) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SI // SI = &x + MOVQ y_base+40(FP), DI // DI = &y + MOVQ x_len+24(FP), CX // CX = min( len(x), len(y) ) + CMPQ y_len+48(FP), CX + CMOVQLE y_len+48(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + PXOR X0, X0 // Clear work registers and cache-align loop + PXOR X1, X1 + MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SI)(AX*8), X4 + MOVUPS 32(SI)(AX*8), X6 + MOVUPS 48(SI)(AX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI)(AX*8), X3 + ADDPD 16(DI)(AX*8), X5 + ADDPD 32(DI)(AX*8), X7 + ADDPD 48(DI)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX + JNZ caxy_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI)(AX*8), X3 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + ADDQ $2, AX // i += 2 + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s new file mode 100644 index 00000000..b80015fd --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s @@ -0,0 +1,123 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitaryTo(dst []complex128, alpha complex64, x, y []complex128) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+40(FP), SI // SI = &x + MOVQ y_base+64(FP), DX // DX = &y + MOVQ x_len+48(FP), CX // CX = min( len(x), len(y), len(dst) ) + CMPQ y_len+72(FP), CX + CMOVQLE y_len+72(FP), CX + CMPQ dst_len+8(FP), CX + CMOVQLE dst_len+8(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + MOVUPS alpha+24(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SI)(AX*8), X4 + MOVUPS 32(SI)(AX*8), X6 + MOVUPS 48(SI)(AX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 // Load and duplicate imag elements (xi, xi) + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 // duplicate real elements (xr, xr) + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX)(AX*8), X3 + ADDPD 16(DX)(AX*8), X5 + ADDPD 32(DX)(AX*8), X7 + ADDPD 48(DX)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX + JNZ caxy_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // Same calculation, but read in values to avoid trampling memory + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX)(AX*8), X3 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + ADDQ $2, AX // i += 2 + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go new file mode 100644 index 00000000..7bb5691d --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package c128 provides complex128 vector primitives. +package c128 diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s new file mode 100644 index 00000000..301d294f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s @@ -0,0 +1,153 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 +#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 +#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 +#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 + +#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 +#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 +#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 +#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) +TEXT ·DotcInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) + SHLQ $4, INC_X + MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) + SHLQ $4, INC_Y + LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX + SHLQ $4, INC_X // INC_X *= sizeof(complex128) + MOVQ incY+64(FP), INC_Y // INC_Y = incY + SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) + MOVSD $(-1.0), NEG1 + SHUFPD $0, NEG1, NEG1 // { -1, -1 } + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dot_tail // if n <= 4 { goto dot_tail } + MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) + +dot_loop: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_XPTR_INCX__X5 + MOVDDUP_XPTR_INCX_2__X7 + MOVDDUP_XPTR_INCx3X__X9 + + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_8_XPTR_INCX__X4 + MOVDDUP_8_XPTR_INCX_2__X6 + MOVDDUP_8_XPTR_INCx3X__X8 + + // X_i = { -imag(x[i]), -imag(x[i]) } + MULPD NEG1, X2 + MULPD P_NEG1, X4 + MULPD NEG1, X6 + MULPD P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR), X10 + MOVUPS (Y_PTR)(INC_Y*1), X11 + MOVUPS (Y_PTR)(INC_Y*2), X12 + MOVUPS (Y_PTR)(INCx3_Y*1), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // sum += result[i] + ADDQ INC_X, X_PTR // X_PTR += incX + ADDQ INC_Y, Y_PTR // Y_PTR += incY + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVUPS SUM, sum+88(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s new file mode 100644 index 00000000..1db7e156 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s @@ -0,0 +1,143 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 +#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 +#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 +#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 + +#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 +#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 +#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 +#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcUnitary(x, y []complex128) (sum complex128) +TEXT ·DotcUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // sum = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + XORPS P_SUM, P_SUM // psum = 0 + MOVSD $(-1.0), NEG1 + SHUFPD $0, NEG1, NEG1 // { -1, -1 } + XORQ IDX, IDX // i := 0 + MOVQ $1, I_IDX // j := 1 + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = floor( TAIL / 4 ) + SHRQ $2, LEN // LEN = TAIL % 4 + JZ dot_tail // if LEN == 0 { goto dot_tail } + + MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining + +dot_loop: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_16_XPTR_IDX_8__X5 + MOVDDUP_32_XPTR_IDX_8__X7 + MOVDDUP_48_XPTR_IDX_8__X9 + + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_16_XPTR_IIDX_8__X4 + MOVDDUP_32_XPTR_IIDX_8__X6 + MOVDDUP_48_XPTR_IIDX_8__X8 + + // X_i = { -imag(x[i]), -imag(x[i]) } + MULPD NEG1, X2 + MULPD P_NEG1, X4 + MULPD NEG1, X6 + MULPD P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + ADDQ $8, I_IDX // I_IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i]) , real(x[i]) } + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } + MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // SUM += result[i] + ADDQ $2, IDX // IDX += 2 + ADDQ $2, I_IDX // I_IDX += 2 + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVUPS SUM, sum+48(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s new file mode 100644 index 00000000..386467fc --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s @@ -0,0 +1,141 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 +#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 +#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 +#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 + +#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 +#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 +#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 +#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 + +// func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) +TEXT ·DotuInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // sum = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) + SHLQ $4, INC_X + MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) + SHLQ $4, INC_Y + LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX + SHLQ $4, INC_X // INC_X *= sizeof(complex128) + MOVQ incY+64(FP), INC_Y // INC_Y = incY + SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) + MOVQ LEN, TAIL + ANDQ $3, TAIL // LEN = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN <= 4 { goto dot_tail } + PXOR P_SUM, P_SUM // psum = 0 + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) + +dot_loop: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_XPTR_INCX__X5 + MOVDDUP_XPTR_INCX_2__X7 + MOVDDUP_XPTR_INCx3X__X9 + + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_8_XPTR_INCX__X4 + MOVDDUP_8_XPTR_INCX_2__X6 + MOVDDUP_8_XPTR_INCx3X__X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR), X10 + MOVUPS (Y_PTR)(INC_Y*1), X11 + MOVUPS (Y_PTR)(INC_Y*2), X12 + MOVUPS (Y_PTR)(INCx3_Y*1), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + + DECQ LEN + JNZ dot_loop // } while --BX > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // sum += result[i] + ADDQ INC_X, X_PTR // X_PTR += incX + ADDQ INC_Y, Y_PTR // Y_PTR += incY + DECQ TAIL // --TAIL + JNZ dot_tail // } while TAIL > 0 + +dot_end: + MOVUPS SUM, sum+88(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s new file mode 100644 index 00000000..d0d507cd --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s @@ -0,0 +1,130 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 +#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 +#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 +#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 + +#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 +#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 +#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 +#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX + +// func DotuUnitary(x, y []complex128) (sum complex128) +TEXT ·DotuUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + PXOR P_SUM, P_SUM // P_SUM = 0 + XORQ IDX, IDX // IDX = 0 + MOVQ $1, DX // j = 1 + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = floor( LEN / 4 ) + SHRQ $2, LEN // LEN = LEN % 4 + JZ dot_tail // if LEN == 0 { goto dot_tail } + +dot_loop: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_16_XPTR_IDX_8__X5 + MOVDDUP_32_XPTR_IDX_8__X7 + MOVDDUP_48_XPTR_IDX_8__X9 + + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_16_XPTR_IIDX_8__X4 + MOVDDUP_32_XPTR_IIDX_8__X6 + MOVDDUP_48_XPTR_IIDX_8__X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + ADDQ $8, I_IDX // I_IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i] , real(x[i]) } + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // psum += result[i] + ADDQ $2, IDX // IDX += 2 + ADDQ $2, I_IDX // I_IDX += 2 + DECQ TAIL // --TAIL + JNZ dot_tail // } while TAIL > 0 + +dot_end: + MOVUPS SUM, sum+48(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s new file mode 100644 index 00000000..40d5851a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s @@ -0,0 +1,69 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define TAIL BX +#define INC R9 +#define INC3 R10 +#define ALPHA X0 +#define ALPHA_2 X1 + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 + +// func DscalInc(alpha float64, x []complex128, n, inc uintptr) +TEXT ·DscalInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SRC // SRC = &x + MOVQ n+32(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE dscal_end + + MOVDDUP_ALPHA // ALPHA = alpha + MOVQ inc+40(FP), INC // INC = inc + SHLQ $4, INC // INC = INC * sizeof(complex128) + LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC + MOVUPS ALPHA, ALPHA_2 // Copy ALPHA and ALPHA_2 for pipelining + MOVQ LEN, TAIL // TAIL = LEN + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dscal_tail // if LEN == 0 { goto dscal_tail } + +dscal_loop: // do { + MOVUPS (SRC), X2 // X_i = x[i] + MOVUPS (SRC)(INC*1), X3 + MOVUPS (SRC)(INC*2), X4 + MOVUPS (SRC)(INC3*1), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST) // x[i] = X_i + MOVUPS X3, (DST)(INC*1) + MOVUPS X4, (DST)(INC*2) + MOVUPS X5, (DST)(INC3*1) + + LEAQ (SRC)(INC*4), SRC // SRC += INC*4 + DECQ LEN + JNZ dscal_loop // } while --LEN > 0 + +dscal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JE dscal_end // if TAIL == 0 { return } + +dscal_tail_loop: // do { + MOVUPS (SRC), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST) // x[i] = X_i + ADDQ INC, SRC // SRC += INC + DECQ TAIL + JNZ dscal_tail_loop // } while --TAIL > 0 + +dscal_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s new file mode 100644 index 00000000..cbc0768a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s @@ -0,0 +1,66 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define IDX AX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 + +// func DscalUnitary(alpha float64, x []complex128) +TEXT ·DscalUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SRC // SRC = &x + MOVQ x_len+16(FP), LEN // LEN = len(x) + CMPQ LEN, $0 // if LEN == 0 { return } + JE dscal_end + + MOVDDUP_ALPHA // ALPHA = alpha + XORQ IDX, IDX // IDX = 0 + MOVUPS ALPHA, ALPHA_2 // Copy ALPHA to ALPHA_2 for pipelining + MOVQ LEN, TAIL // TAIL = LEN + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dscal_tail // if LEN == 0 { goto dscal_tail } + +dscal_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(SRC)(IDX*8), X3 + MOVUPS 32(SRC)(IDX*8), X4 + MOVUPS 48(SRC)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST)(IDX*8) // x[i] = X_i + MOVUPS X3, 16(DST)(IDX*8) + MOVUPS X4, 32(DST)(IDX*8) + MOVUPS X5, 48(DST)(IDX*8) + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dscal_loop // } while --LEN > 0 + +dscal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ dscal_end // if TAIL == 0 { return } + +dscal_tail_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST)(IDX*8) // x[i] = X_i + ADDQ $2, IDX // IDX += 2 + DECQ TAIL + JNZ dscal_tail_loop // } while --TAIL > 0 + +dscal_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go new file mode 100644 index 00000000..47a80e50 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go @@ -0,0 +1,31 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c128 + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []complex128, alpha complex128, x []complex128) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []complex128, incDst uintptr, alpha complex128, x []complex128, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s new file mode 100644 index 00000000..7b807b3a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s @@ -0,0 +1,116 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define IDX AX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_C X1 +#define ALPHA2 X10 +#define ALPHA_C2 X11 + +#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 +#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 +#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 +#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +// func ScalUnitary(alpha complex128, x []complex128) +TEXT ·ScalUnitary(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SRC // SRC = &x + MOVQ x_len+24(FP), LEN // LEN = len(x) + CMPQ LEN, $0 // if LEN == 0 { return } + JE scal_end + + MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } + MOVAPS ALPHA, ALPHA_C + SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } + + XORQ IDX, IDX // IDX = 0 + MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining + MOVAPS ALPHA_C, ALPHA_C2 + MOVQ LEN, TAIL + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ scal_tail // if BX == 0 { goto scal_tail } + +scal_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SRC)(IDX*8), X4 + MOVUPS 32(SRC)(IDX*8), X6 + MOVUPS 48(SRC)(IDX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + MULPD ALPHA_C, X2 + MULPD ALPHA, X3 + MULPD ALPHA_C2, X4 + MULPD ALPHA2, X5 + MULPD ALPHA_C, X6 + MULPD ALPHA, X7 + MULPD ALPHA_C2, X8 + MULPD ALPHA2, X9 + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) + MOVUPS X5, 16(DST)(IDX*8) + MOVUPS X7, 32(DST)(IDX*8) + MOVUPS X9, 48(DST)(IDX*8) + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ scal_loop // } while --LEN > 0 + +scal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ scal_end // if TAIL == 0 { return } + +scal_tail_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) + ADDQ $2, IDX // IDX += 2 + DECQ TAIL + JNZ scal_tail_loop // } while --LEN > 0 + +scal_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s new file mode 100644 index 00000000..7857c155 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s @@ -0,0 +1,121 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define TAIL BX +#define INC R9 +#define INC3 R10 +#define ALPHA X0 +#define ALPHA_C X1 +#define ALPHA2 X10 +#define ALPHA_C2 X11 + +#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 +#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 +#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 +#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +// func ScalInc(alpha complex128, x []complex128, n, inc uintptr) +TEXT ·ScalInc(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SRC // SRC = &x + MOVQ n+40(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE scal_end // if LEN == 0 { return } + + MOVQ inc+48(FP), INC // INC = inc + SHLQ $4, INC // INC = INC * sizeof(complex128) + LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC + + MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } + MOVAPS ALPHA, ALPHA_C + SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } + + MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining + MOVAPS ALPHA_C, ALPHA_C2 + MOVQ LEN, TAIL + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ scal_tail // if BX == 0 { goto scal_tail } + +scal_loop: // do { + MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SRC)(INC*1), X4 + MOVUPS (SRC)(INC*2), X6 + MOVUPS (SRC)(INC3*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + MULPD ALPHA_C, X2 + MULPD ALPHA, X3 + MULPD ALPHA_C2, X4 + MULPD ALPHA2, X5 + MULPD ALPHA_C, X6 + MULPD ALPHA, X7 + MULPD ALPHA_C2, X8 + MULPD ALPHA2, X9 + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + MOVUPS X3, (DST) // x[i] = X_(i+1) + MOVUPS X5, (DST)(INC*1) + MOVUPS X7, (DST)(INC*2) + MOVUPS X9, (DST)(INC3*1) + + LEAQ (SRC)(INC*4), SRC // SRC = &(SRC[inc*4]) + DECQ LEN + JNZ scal_loop // } while --BX > 0 + +scal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JE scal_end // if TAIL == 0 { return } + +scal_tail_loop: // do { + MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + MOVUPS X3, (DST) // x[i] = X_i + ADDQ INC, SRC // SRC = &(SRC[incX]) + DECQ TAIL + JNZ scal_tail_loop // } while --TAIL > 0 + +scal_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go new file mode 100644 index 00000000..ad6b23ca --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go @@ -0,0 +1,96 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package c128 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex128, x, y []complex128) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) + +// DscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func DscalUnitary(alpha float64, x []complex128) + +// DscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func DscalInc(alpha float64, x []complex128, n, inc uintptr) + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex128, x []complex128, n, inc uintptr) + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex128, x []complex128) + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum +func DotcUnitary(x, y []complex128) (sum complex128) + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex128) (sum complex128) + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go new file mode 100644 index 00000000..6313e571 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go @@ -0,0 +1,163 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package c128 + +import "math/cmplx" + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex128, x, y []complex128) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func DscalUnitary(alpha float64, x []complex128) { + for i, v := range x { + x[i] = complex(real(v)*alpha, imag(v)*alpha) + } +} + +// DscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func DscalInc(alpha float64, x []complex128, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) + ix += inc + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex128, x []complex128, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += inc + } +} + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex128, x []complex128) { + for i := range x { + x[i] *= alpha + } +} + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum +func DotcUnitary(x, y []complex128) (sum complex128) { + for i, v := range x { + sum += y[i] * cmplx.Conj(v) + } + return sum +} + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { + for i := 0; i < int(n); i++ { + sum += y[iy] * cmplx.Conj(x[ix]) + ix += incX + iy += incY + } + return sum +} + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex128) (sum complex128) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s new file mode 100644 index 00000000..2d167c08 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s @@ -0,0 +1,73 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ n+56(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JLE axpyi_end + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ ix+80(FP), R8 // R8 = ix + MOVQ iy+88(FP), R9 // R9 = iy + LEAQ (SI)(R8*4), SI // SI = &(x[ix]) + LEAQ (DI)(R9*4), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI Read Pointer for y + MOVQ incX+64(FP), R8 // R8 = incX + SHLQ $2, R8 // R8 *= sizeof(float32) + MOVQ incY+72(FP), R9 // R9 = incY + SHLQ $2, R9 // R9 *= sizeof(float32) + MOVSS alpha+0(FP), X0 // X0 = alpha + MOVSS X0, X1 // X1 = X0 // for pipelining + MOVQ CX, BX + ANDQ $3, BX // BX = n % 4 + SHRQ $2, CX // CX = floor( n / 4 ) + JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } + +axpyi_loop: // Loop unrolled 4x do { + MOVSS (SI), X2 // X_i = x[i] + MOVSS (SI)(R8*1), X3 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSS (SI), X4 + MOVSS (SI)(R8*1), X5 + MULSS X1, X2 // X_i *= a + MULSS X0, X3 + MULSS X1, X4 + MULSS X0, X5 + ADDSS (DX), X2 // X_i += y[i] + ADDSS (DX)(R9*1), X3 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDSS (DX), X4 + ADDSS (DX)(R9*1), X5 + MOVSS X2, (DI) // y[i] = X_i + MOVSS X3, (DI)(R9*1) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + MOVSS X4, (DI) + MOVSS X5, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + LOOP axpyi_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpyi_end + +axpyi_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +axpyi_tail: // do { + MOVSS (SI), X2 // X2 = x[i] + MULSS X1, X2 // X2 *= a + ADDSS (DI), X2 // X2 += y[i] + MOVSS X2, (DI) // y[i] = X2 + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET + diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s new file mode 100644 index 00000000..b79f9926 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s @@ -0,0 +1,78 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ n+96(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JLE axpyi_end + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+48(FP), SI // SI = &x + MOVQ y_base+72(FP), DX // DX = &y + MOVQ ix+120(FP), R8 // R8 = ix // Load the first index + MOVQ iy+128(FP), R9 // R9 = iy + MOVQ idst+32(FP), R10 // R10 = idst + LEAQ (SI)(R8*4), SI // SI = &(x[ix]) + LEAQ (DX)(R9*4), DX // DX = &(y[iy]) + LEAQ (DI)(R10*4), DI // DI = &(dst[idst]) + MOVQ incX+104(FP), R8 // R8 = incX + SHLQ $2, R8 // R8 *= sizeof(float32) + MOVQ incY+112(FP), R9 // R9 = incY + SHLQ $2, R9 // R9 *= sizeof(float32) + MOVQ incDst+24(FP), R10 // R10 = incDst + SHLQ $2, R10 // R10 *= sizeof(float32) + MOVSS alpha+40(FP), X0 // X0 = alpha + MOVSS X0, X1 // X1 = X0 // for pipelining + MOVQ CX, BX + ANDQ $3, BX // BX = n % 4 + SHRQ $2, CX // CX = floor( n / 4 ) + JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } + +axpyi_loop: // Loop unrolled 4x do { + MOVSS (SI), X2 // X_i = x[i] + MOVSS (SI)(R8*1), X3 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSS (SI), X4 + MOVSS (SI)(R8*1), X5 + MULSS X1, X2 // X_i *= a + MULSS X0, X3 + MULSS X1, X4 + MULSS X0, X5 + ADDSS (DX), X2 // X_i += y[i] + ADDSS (DX)(R9*1), X3 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDSS (DX), X4 + ADDSS (DX)(R9*1), X5 + MOVSS X2, (DI) // dst[i] = X_i + MOVSS X3, (DI)(R10*1) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + MOVSS X4, (DI) + MOVSS X5, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + LOOP axpyi_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpyi_end + +axpyi_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +axpyi_tail: // do { + MOVSS (SI), X2 // X2 = x[i] + MULSS X1, X2 // X2 *= a + ADDSS (DX), X2 // X2 += y[i] + MOVSS X2, (DI) // dst[i] = X2 + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DX // DX = &(DX[incY]) + ADDQ R10, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET + diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s new file mode 100644 index 00000000..97df90a0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s @@ -0,0 +1,97 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyUnitary(alpha float32, x, y []float32) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ x_len+16(FP), BX // BX = min( len(x), len(y) ) + CMPQ y_len+40(FP), BX + CMOVQLE y_len+40(FP), BX + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + MOVSS alpha+0(FP), X0 + SHUFPS $0, X0, X0 // X0 = { a, a, a, a } + XORQ AX, AX // i = 0 + PXOR X2, X2 // 2 NOP instructions (PXOR) to align + PXOR X3, X3 // loop to cache line + MOVQ DI, CX + ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS + JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } + + XORQ $0xF, CX // CX = 4 - floor( BX % 16 / 4 ) + INCQ CX + SHRQ $2, CX + +axpy_align: // Trim first value(s) in unaligned buffer do { + MOVSS (SI)(AX*4), X2 // X2 = x[i] + MULSS X0, X2 // X2 *= a + ADDSS (DI)(AX*4), X2 // X2 += y[i] + MOVSS X2, (DI)(AX*4) // y[i] = X2 + INCQ AX // i++ + DECQ BX + JZ axpy_end // if --BX == 0 { return } + LOOP axpy_align // } while --CX > 0 + +axpy_no_trim: + MOVUPS X0, X1 // Copy X0 to X1 for pipelining + MOVQ BX, CX + ANDQ $0xF, BX // BX = len % 16 + SHRQ $4, CX // CX = int( len / 16 ) + JZ axpy_tail4_start // if CX == 0 { return } + +axpy_loop: // Loop unrolled 16x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] + MOVUPS 16(SI)(AX*4), X3 + MOVUPS 32(SI)(AX*4), X4 + MOVUPS 48(SI)(AX*4), X5 + MULPS X0, X2 // X2 *= a + MULPS X1, X3 + MULPS X0, X4 + MULPS X1, X5 + ADDPS (DI)(AX*4), X2 // X2 += y[i:i+4] + ADDPS 16(DI)(AX*4), X3 + ADDPS 32(DI)(AX*4), X4 + ADDPS 48(DI)(AX*4), X5 + MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 + MOVUPS X3, 16(DI)(AX*4) + MOVUPS X4, 32(DI)(AX*4) + MOVUPS X5, 48(DI)(AX*4) + ADDQ $16, AX // i += 16 + LOOP axpy_loop // while (--CX) > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + +axpy_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ BX, CX // CX = floor( BX / 4 ) + SHRQ $2, CX + JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } + +axpy_tail4: // Loop unrolled 4x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i] + MULPS X0, X2 // X2 *= a + ADDPS (DI)(AX*4), X2 // X2 += y[i] + MOVUPS X2, (DI)(AX*4) // y[i] = X2 + ADDQ $4, AX // i += 4 + LOOP axpy_tail4 // } while --CX > 0 + +axpy_tail_start: // Reset loop counter for 1-wide tail loop + MOVQ BX, CX // CX = BX % 4 + ANDQ $3, CX + JZ axpy_end // if CX == 0 { return } + +axpy_tail: + MOVSS (SI)(AX*4), X1 // X1 = x[i] + MULSS X0, X1 // X1 *= a + ADDSS (DI)(AX*4), X1 // X1 += y[i] + MOVSS X1, (DI)(AX*4) // y[i] = X1 + INCQ AX // i++ + LOOP axpy_tail // } while --CX > 0 + +axpy_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s new file mode 100644 index 00000000..a826ca31 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s @@ -0,0 +1,98 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+32(FP), SI // SI = &x + MOVQ y_base+56(FP), DX // DX = &y + MOVQ x_len+40(FP), BX // BX = min( len(x), len(y), len(dst) ) + CMPQ y_len+64(FP), BX + CMOVQLE y_len+64(FP), BX + CMPQ dst_len+8(FP), BX + CMOVQLE dst_len+8(FP), BX + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + MOVSS alpha+24(FP), X0 + SHUFPS $0, X0, X0 // X0 = { a, a, a, a, } + XORQ AX, AX // i = 0 + MOVQ DX, CX + ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS + JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } + + XORQ $0xF, CX // CX = 4 - floor ( B % 16 / 4 ) + INCQ CX + SHRQ $2, CX + +axpy_align: // Trim first value(s) in unaligned buffer do { + MOVSS (SI)(AX*4), X2 // X2 = x[i] + MULSS X0, X2 // X2 *= a + ADDSS (DX)(AX*4), X2 // X2 += y[i] + MOVSS X2, (DI)(AX*4) // y[i] = X2 + INCQ AX // i++ + DECQ BX + JZ axpy_end // if --BX == 0 { return } + LOOP axpy_align // } while --CX > 0 + +axpy_no_trim: + MOVUPS X0, X1 // Copy X0 to X1 for pipelining + MOVQ BX, CX + ANDQ $0xF, BX // BX = len % 16 + SHRQ $4, CX // CX = floor( len / 16 ) + JZ axpy_tail4_start // if CX == 0 { return } + +axpy_loop: // Loop unrolled 16x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] + MOVUPS 16(SI)(AX*4), X3 + MOVUPS 32(SI)(AX*4), X4 + MOVUPS 48(SI)(AX*4), X5 + MULPS X0, X2 // X2 *= a + MULPS X1, X3 + MULPS X0, X4 + MULPS X1, X5 + ADDPS (DX)(AX*4), X2 // X2 += y[i:i+4] + ADDPS 16(DX)(AX*4), X3 + ADDPS 32(DX)(AX*4), X4 + ADDPS 48(DX)(AX*4), X5 + MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 + MOVUPS X3, 16(DI)(AX*4) + MOVUPS X4, 32(DI)(AX*4) + MOVUPS X5, 48(DI)(AX*4) + ADDQ $16, AX // i += 16 + LOOP axpy_loop // while (--CX) > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + +axpy_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ BX, CX // CX = floor( BX / 4 ) + SHRQ $2, CX + JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } + +axpy_tail4: // Loop unrolled 4x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i] + MULPS X0, X2 // X2 *= a + ADDPS (DX)(AX*4), X2 // X2 += y[i] + MOVUPS X2, (DI)(AX*4) // y[i] = X2 + ADDQ $4, AX // i += 4 + LOOP axpy_tail4 // } while --CX > 0 + +axpy_tail_start: // Reset loop counter for 1-wide tail loop + MOVQ BX, CX // CX = BX % 4 + ANDQ $3, CX + JZ axpy_end // if CX == 0 { return } + +axpy_tail: + MOVSS (SI)(AX*4), X1 // X1 = x[i] + MULSS X0, X1 // X1 *= a + ADDSS (DX)(AX*4), X1 // X1 += y[i] + MOVSS X1, (DI)(AX*4) // y[i] = X1 + INCQ AX // i++ + LOOP axpy_tail // } while --CX > 0 + +axpy_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s new file mode 100644 index 00000000..4518e049 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s @@ -0,0 +1,91 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R10 +#define INC_Y R9 +#define INCx3_Y R11 +#define SUM X0 +#define P_SUM X1 + +// func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) +TEXT ·DdotInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 + JE dot_end + + MOVQ ix+72(FP), INC_X // INC_X = ix + MOVQ iy+80(FP), INC_Y // INC_Y = iy + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) + + MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(float32) + SHLQ $2, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(float32) + SHLQ $2, INC_Y + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN == 0 { goto dot_tail } + + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dot_loop: // Loop unrolled 4x do { + CVTSS2SD (X_PTR), X2 // X_i = x[i:i+1] + CVTSS2SD (X_PTR)(INC_X*1), X3 + CVTSS2SD (X_PTR)(INC_X*2), X4 + CVTSS2SD (X_PTR)(INCx3_X*1), X5 + + CVTSS2SD (Y_PTR), X6 // X_j = y[i:i+1] + CVTSS2SD (Y_PTR)(INC_Y*1), X7 + CVTSS2SD (Y_PTR)(INC_Y*2), X8 + CVTSS2SD (Y_PTR)(INCx3_Y*1), X9 + + MULSD X6, X2 // X_i *= X_j + MULSD X7, X3 + MULSD X8, X4 + MULSD X9, X5 + + ADDSD X2, SUM // SUM += X_i + ADDSD X3, P_SUM + ADDSD X4, SUM + ADDSD X5, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDSD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + CVTSS2SD (X_PTR), X2 // X2 = x[i] + CVTSS2SD (Y_PTR), X3 // X2 *= y[i] + MULSD X3, X2 + ADDSD X2, SUM // SUM += X2 + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVSD SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s new file mode 100644 index 00000000..231cbd3b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s @@ -0,0 +1,110 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define HADDPD_SUM_SUM LONG $0xC07C0F66 // @ HADDPD X0, X0 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define IDX AX +#define SUM X0 +#define P_SUM X1 + +// func DdotUnitary(x, y []float32) (sum float32) +TEXT ·DdotUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // psum = 0 + CMPQ LEN, $0 + JE dot_end + + XORQ IDX, IDX + MOVQ Y_PTR, DX + ANDQ $0xF, DX // Align on 16-byte boundary for ADDPS + JZ dot_no_trim // if DX == 0 { goto dot_no_trim } + + SUBQ $16, DX + +dot_align: // Trim first value(s) in unaligned buffer do { + CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) + CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) + MULSD X3, X2 + ADDSD X2, SUM // SUM += X2 + INCQ IDX // IDX++ + DECQ LEN + JZ dot_end // if --TAIL == 0 { return } + ADDQ $4, DX + JNZ dot_align // } while --LEN > 0 + +dot_no_trim: + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + MOVQ LEN, TAIL + ANDQ $0x7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } + +dot_loop: // Loop unrolled 8x do { + CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + CVTPS2PD 8(X_PTR)(IDX*4), X3 + CVTPS2PD 16(X_PTR)(IDX*4), X4 + CVTPS2PD 24(X_PTR)(IDX*4), X5 + + CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] + CVTPS2PD 8(Y_PTR)(IDX*4), X7 + CVTPS2PD 16(Y_PTR)(IDX*4), X8 + CVTPS2PD 24(Y_PTR)(IDX*4), X9 + + MULPD X6, X2 // X_i *= X_j + MULPD X7, X3 + MULPD X8, X4 + MULPD X9, X5 + + ADDPD X2, SUM // SUM += X_i + ADDPD X3, P_SUM + ADDPD X4, SUM + ADDPD X5, P_SUM + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDPD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail_start: + MOVQ TAIL, LEN + SHRQ $1, LEN + JZ dot_tail_one + +dot_tail_two: + CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] + MULPD X6, X2 // X_i *= X_j + ADDPD X2, SUM // SUM += X_i + ADDQ $2, IDX // IDX += 2 + DECQ LEN + JNZ dot_tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ dot_end + +dot_tail_one: + CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) + CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) + MULSD X3, X2 // X2 *= X3 + ADDSD X2, SUM // SUM += X2 + +dot_end: + HADDPD_SUM_SUM // SUM = \sum{ SUM[i] } + MOVSD SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go new file mode 100644 index 00000000..cc541375 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f32 provides float32 vector primitives. +package f32 diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s new file mode 100644 index 00000000..4d36b289 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s @@ -0,0 +1,85 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R10 +#define INC_Y R9 +#define INCx3_Y R11 +#define SUM X0 +#define P_SUM X1 + +// func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) +TEXT ·DotInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + MOVQ n+48(FP), LEN // LEN = n + CMPQ LEN, $0 + JE dot_end + + MOVQ ix+72(FP), INC_X // INC_X = ix + MOVQ iy+80(FP), INC_Y // INC_Y = iy + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) + + MOVQ incX+56(FP), INC_X // INC_X := incX * sizeof(float32) + SHLQ $2, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y := incY * sizeof(float32) + SHLQ $2, INC_Y + + MOVQ LEN, TAIL + ANDQ $0x3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN == 0 { goto dot_tail } + + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dot_loop: // Loop unrolled 4x do { + MOVSS (X_PTR), X2 // X_i = x[i:i+1] + MOVSS (X_PTR)(INC_X*1), X3 + MOVSS (X_PTR)(INC_X*2), X4 + MOVSS (X_PTR)(INCx3_X*1), X5 + + MULSS (Y_PTR), X2 // X_i *= y[i:i+1] + MULSS (Y_PTR)(INC_Y*1), X3 + MULSS (Y_PTR)(INC_Y*2), X4 + MULSS (Y_PTR)(INCx3_Y*1), X5 + + ADDSS X2, SUM // SUM += X_i + ADDSS X3, P_SUM + ADDSS X4, SUM + ADDSS X5, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDSS P_SUM, SUM // P_SUM += SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVSS (X_PTR), X2 // X2 = x[i] + MULSS (Y_PTR), X2 // X2 *= y[i] + ADDSS X2, SUM // SUM += X2 + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVSS SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s new file mode 100644 index 00000000..c32ede5a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s @@ -0,0 +1,106 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define HADDPS_SUM_SUM LONG $0xC07C0FF2 // @ HADDPS X0, X0 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define IDX AX +#define SUM X0 +#define P_SUM X1 + +// func DotUnitary(x, y []float32) (sum float32) +TEXT ·DotUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 + JE dot_end + + XORQ IDX, IDX + MOVQ Y_PTR, DX + ANDQ $0xF, DX // Align on 16-byte boundary for MULPS + JZ dot_no_trim // if DX == 0 { goto dot_no_trim } + SUBQ $16, DX + +dot_align: // Trim first value(s) in unaligned buffer do { + MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] + MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] + ADDSS X2, SUM // SUM += X2 + INCQ IDX // IDX++ + DECQ LEN + JZ dot_end // if --TAIL == 0 { return } + ADDQ $4, DX + JNZ dot_align // } while --DX > 0 + +dot_no_trim: + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + MOVQ LEN, TAIL + ANDQ $0xF, TAIL // TAIL = LEN % 16 + SHRQ $4, LEN // LEN = floor( LEN / 16 ) + JZ dot_tail4_start // if LEN == 0 { goto dot_tail4_start } + +dot_loop: // Loop unrolled 16x do { + MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + MOVUPS 16(X_PTR)(IDX*4), X3 + MOVUPS 32(X_PTR)(IDX*4), X4 + MOVUPS 48(X_PTR)(IDX*4), X5 + + MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] + MULPS 16(Y_PTR)(IDX*4), X3 + MULPS 32(Y_PTR)(IDX*4), X4 + MULPS 48(Y_PTR)(IDX*4), X5 + + ADDPS X2, SUM // SUM += X_i + ADDPS X3, P_SUM + ADDPS X4, SUM + ADDPS X5, P_SUM + + ADDQ $16, IDX // IDX += 16 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDPS P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ TAIL, LEN // LEN = floor( TAIL / 4 ) + SHRQ $2, LEN + JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } + +dot_tail4_loop: // Loop unrolled 4x do { + MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] + ADDPS X2, SUM // SUM += X_i + ADDQ $4, IDX // i += 4 + DECQ LEN + JNZ dot_tail4_loop // } while --LEN > 0 + +dot_tail_start: // Reset loop counter for 1-wide tail loop + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ dot_end // if TAIL == 0 { return } + +dot_tail: // do { + MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] + MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] + ADDSS X2, SUM // psum += X2 + INCQ IDX // IDX++ + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + HADDPS_SUM_SUM // SUM = \sum{ SUM[i] } + HADDPS_SUM_SUM + MOVSS SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go new file mode 100644 index 00000000..2b336a2a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go @@ -0,0 +1,15 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f32 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float32, + x []float32, incX uintptr, + y []float32, incY uintptr, + a []float32, lda uintptr) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s new file mode 100644 index 00000000..e5e80c52 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s @@ -0,0 +1,757 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 4 +#define BITSIZE 2 +#define KERNELSIZE 3 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define Y y_base+56(FP) +#define Y_PTR DX +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X0 +#define ALPHA_SPILL al-16(SP) + +#define LOAD_ALPHA \ + MOVSS alpha+16(FP), ALPHA \ + SHUFPS $0, ALPHA, ALPHA + +#define LOAD_SCALED4 \ + PREFETCHNTA 16*SIZE(X_PTR) \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP 2*SIZE(X_PTR), X3 \ + MOVSHDUP X1, X2 \ + MOVSHDUP X3, X4 \ + MOVSLDUP X1, X1 \ + MOVSLDUP X3, X3 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 \ + MULPS ALPHA, X3 \ + MULPS ALPHA, X4 + +#define LOAD_SCALED2 \ + MOVDDUP (X_PTR), X1 \ + MOVSHDUP X1, X2 \ + MOVSLDUP X1, X1 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 + +#define LOAD_SCALED1 \ + MOVSS (X_PTR), X1 \ + SHUFPS $0, X1, X1 \ + MULPS ALPHA, X1 + +#define LOAD_SCALED4_INC \ + PREFETCHNTA (X_PTR)(INC_X*8) \ + MOVSS (X_PTR), X1 \ + MOVSS (X_PTR)(INC_X*1), X2 \ + MOVSS (X_PTR)(INC_X*2), X3 \ + MOVSS (X_PTR)(INC3_X*1), X4 \ + SHUFPS $0, X1, X1 \ + SHUFPS $0, X2, X2 \ + SHUFPS $0, X3, X3 \ + SHUFPS $0, X4, X4 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 \ + MULPS ALPHA, X3 \ + MULPS ALPHA, X4 + +#define LOAD_SCALED2_INC \ + MOVSS (X_PTR), X1 \ + MOVSS (X_PTR)(INC_X*1), X2 \ + SHUFPS $0, X1, X1 \ + SHUFPS $0, X2, X2 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 + +#define KERNEL_LOAD8 \ + MOVUPS (Y_PTR), X5 \ + MOVUPS 4*SIZE(Y_PTR), X6 + +#define KERNEL_LOAD8_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + MOVSS (Y_PTR)(INC_Y*2), X7 \ + MOVSS (Y_PTR)(INC3_Y*1), X8 \ + UNPCKLPS X6, X5 \ + UNPCKLPS X8, X7 \ + MOVLHPS X7, X5 \ + LEAQ (Y_PTR)(INC_Y*4), Y_PTR \ + MOVSS (Y_PTR), X6 \ + MOVSS (Y_PTR)(INC_Y*1), X7 \ + MOVSS (Y_PTR)(INC_Y*2), X8 \ + MOVSS (Y_PTR)(INC3_Y*1), X9 \ + UNPCKLPS X7, X6 \ + UNPCKLPS X9, X8 \ + MOVLHPS X8, X6 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X5 + +#define KERNEL_LOAD4_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + MOVSS (Y_PTR)(INC_Y*2), X7 \ + MOVSS (Y_PTR)(INC3_Y*1), X8 \ + UNPCKLPS X6, X5 \ + UNPCKLPS X8, X7 \ + MOVLHPS X7, X5 + +#define KERNEL_LOAD2 \ + MOVSD (Y_PTR), X5 + +#define KERNEL_LOAD2_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + UNPCKLPS X6, X5 + +#define KERNEL_4x8 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MOVUPS X5, X9 \ + MOVUPS X6, X10 \ + MOVUPS X5, X11 \ + MOVUPS X6, X12 \ + MULPS X1, X5 \ + MULPS X1, X6 \ + MULPS X2, X7 \ + MULPS X2, X8 \ + MULPS X3, X9 \ + MULPS X3, X10 \ + MULPS X4, X11 \ + MULPS X4, X12 + +#define STORE_4x8 \ + MOVUPS ALPHA, ALPHA_SPILL \ + MOVUPS (A_PTR), X13 \ + ADDPS X13, X5 \ + MOVUPS 4*SIZE(A_PTR), X14 \ + ADDPS X14, X6 \ + MOVUPS (A_PTR)(LDA*1), X15 \ + ADDPS X15, X7 \ + MOVUPS 4*SIZE(A_PTR)(LDA*1), X0 \ + ADDPS X0, X8 \ + MOVUPS (A_PTR)(LDA*2), X13 \ + ADDPS X13, X9 \ + MOVUPS 4*SIZE(A_PTR)(LDA*2), X14 \ + ADDPS X14, X10 \ + MOVUPS (A_PTR)(LDA3*1), X15 \ + ADDPS X15, X11 \ + MOVUPS 4*SIZE(A_PTR)(LDA3*1), X0 \ + ADDPS X0, X12 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ + MOVUPS X9, (A_PTR)(LDA*2) \ + MOVUPS X10, 4*SIZE(A_PTR)(LDA*2) \ + MOVUPS X11, (A_PTR)(LDA3*1) \ + MOVUPS X12, 4*SIZE(A_PTR)(LDA3*1) \ + MOVUPS ALPHA_SPILL, ALPHA \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_4x4 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPS X1, X5 \ + MULPS X2, X6 \ + MULPS X3, X7 \ + MULPS X4, X8 + +#define STORE_4x4 \ + MOVUPS (A_PTR), X13 \ + ADDPS X13, X5 \ + MOVUPS (A_PTR)(LDA*1), X14 \ + ADDPS X14, X6 \ + MOVUPS (A_PTR)(LDA*2), X15 \ + ADDPS X15, X7 \ + MOVUPS (A_PTR)(LDA3*1), X13 \ + ADDPS X13, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + MOVUPS X7, (A_PTR)(LDA*2) \ + MOVUPS X8, (A_PTR)(LDA3*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPS X1, X5 \ + MULPS X2, X6 \ + MULPS X3, X7 \ + MULPS X4, X8 + +#define STORE_4x2 \ + MOVSD (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVSD (A_PTR)(LDA*1), X10 \ + ADDPS X10, X6 \ + MOVSD (A_PTR)(LDA*2), X11 \ + ADDPS X11, X7 \ + MOVSD (A_PTR)(LDA3*1), X12 \ + ADDPS X12, X8 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + MOVSD X7, (A_PTR)(LDA*2) \ + MOVSD X8, (A_PTR)(LDA3*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVSS (Y_PTR), X5 \ + MOVSS X5, X6 \ + MOVSS X5, X7 \ + MOVSS X5, X8 \ + MULSS X1, X5 \ + MULSS X2, X6 \ + MULSS X3, X7 \ + MULSS X4, X8 + +#define STORE_4x1 \ + ADDSS (A_PTR), X5 \ + ADDSS (A_PTR)(LDA*1), X6 \ + ADDSS (A_PTR)(LDA*2), X7 \ + ADDSS (A_PTR)(LDA3*1), X8 \ + MOVSS X5, (A_PTR) \ + MOVSS X6, (A_PTR)(LDA*1) \ + MOVSS X7, (A_PTR)(LDA*2) \ + MOVSS X8, (A_PTR)(LDA3*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_2x8 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MULPS X1, X5 \ + MULPS X1, X6 \ + MULPS X2, X7 \ + MULPS X2, X8 + +#define STORE_2x8 \ + MOVUPS (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVUPS 4*SIZE(A_PTR), X10 \ + ADDPS X10, X6 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPS X11, X7 \ + MOVUPS 4*SIZE(A_PTR)(LDA*1), X12 \ + ADDPS X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_2x4 \ + MOVUPS X5, X6 \ + MULPS X1, X5 \ + MULPS X2, X6 + +#define STORE_2x4 \ + MOVUPS (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPS X11, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVSD X5, X6 \ + MULPS X1, X5 \ + MULPS X2, X6 + +#define STORE_2x2 \ + MOVSD (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVSD (A_PTR)(LDA*1), X8 \ + ADDPS X8, X6 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVSS (Y_PTR), X5 \ + MOVSS X5, X6 \ + MULSS X1, X5 \ + MULSS X2, X6 + +#define STORE_2x1 \ + ADDSS (A_PTR), X5 \ + ADDSS (A_PTR)(LDA*1), X6 \ + MOVSS X5, (A_PTR) \ + MOVSS X6, (A_PTR)(LDA*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x8 \ + MULPS X1, X5 \ + MULPS X1, X6 + +#define STORE_1x8 \ + MOVUPS (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVUPS 4*SIZE(A_PTR), X8 \ + ADDPS X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_1x4 \ + MULPS X1, X5 \ + MULPS X1, X6 + +#define STORE_1x4 \ + MOVUPS (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVUPS X5, (A_PTR) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MULPS X1, X5 + +#define STORE_1x2 \ + MOVSD (A_PTR), X6 \ + ADDPS X6, X5 \ + MOVSD X5, (A_PTR) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSS (Y_PTR), X5 \ + MULSS X1, X5 + +#define STORE_1x1 \ + ADDSS (A_PTR), X5 \ + MOVSS X5, (A_PTR) \ + ADDQ $SIZE, A_PTR + +// func Ger(m, n uintptr, alpha float32, +// x []float32, incX uintptr, +// y []float32, incY uintptr, +// a []float32, lda uintptr) +TEXT ·Ger(SB), 0, $16-120 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + LOAD_ALPHA + + MOVQ x_base+24(FP), X_PTR + MOVQ y_base+56(FP), Y_PTR + MOVQ a_base+88(FP), A_ROW + MOVQ A_ROW, A_PTR + MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float32) + SHLQ $BITSIZE, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + + CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) + JNE inc + CMPQ incX+48(FP), $1 // Check for dense vector X (fast-path) + JNE inc + + SHRQ $2, M + JZ r2 + +r4: + + // LOAD 4 + LOAD_SCALED4 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r4c4 + +r4c8: + // 4x8 KERNEL + KERNEL_LOAD8 + KERNEL_4x8 + STORE_4x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r4c8 + +r4c4: + TESTQ $4, N_DIM + JZ r4c2 + + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE_4x4 + + ADDQ $4*SIZE, Y_PTR + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + STORE_4x2 + + ADDQ $2*SIZE, Y_PTR + +r4c1: + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ $SIZE, Y_PTR + +r4end: + ADDQ $4*SIZE, X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + LOAD_SCALED2 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r2c4 + +r2c8: + // 2x8 KERNEL + KERNEL_LOAD8 + KERNEL_2x8 + STORE_2x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r2c8 + +r2c4: + TESTQ $4, N_DIM + JZ r2c2 + + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + STORE_2x4 + + ADDQ $4*SIZE, Y_PTR + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE_2x2 + + ADDQ $2*SIZE, Y_PTR + +r2c1: + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ $SIZE, Y_PTR + +r2end: + ADDQ $2*SIZE, X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD_SCALED1 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r1c4 + +r1c8: + // 1x8 KERNEL + KERNEL_LOAD8 + KERNEL_1x8 + STORE_1x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r1c8 + +r1c4: + TESTQ $4, N_DIM + JZ r1c2 + + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + STORE_1x4 + + ADDQ $4*SIZE, Y_PTR + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + STORE_1x2 + + ADDQ $2*SIZE, Y_PTR + +r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + +end: + RET + +inc: // Algorithm for incY != 0 ( split loads in kernel ) + + MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float32) + SHLQ $BITSIZE, INC_X + MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float32) + SHLQ $BITSIZE, INC_Y + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_X, TMP1 + NEGQ TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + LOAD_SCALED4_INC + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r4c4 + +inc_r4c8: + // 4x4 KERNEL + KERNEL_LOAD8_INC + KERNEL_4x8 + STORE_4x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r4c8 + +inc_r4c4: + TESTQ $4, N_DIM + JZ inc_r4c2 + + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE_4x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + STORE_4x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r4c1: + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ INC_Y, Y_PTR + +inc_r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + LOAD_SCALED2_INC + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r2c4 + +inc_r2c8: + // 2x8 KERNEL + KERNEL_LOAD8_INC + KERNEL_2x8 + STORE_2x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r2c8 + +inc_r2c4: + TESTQ $4, N_DIM + JZ inc_r2c2 + + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + STORE_2x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE_2x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2c1: + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ INC_Y, Y_PTR + +inc_r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD_SCALED1 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r1c4 + +inc_r1c8: + // 1x8 KERNEL + KERNEL_LOAD8_INC + KERNEL_1x8 + STORE_1x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r1c8 + +inc_r1c4: + TESTQ $4, N_DIM + JZ inc_r1c2 + + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + STORE_1x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + STORE_1x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ inc_end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + +inc_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go new file mode 100644 index 00000000..d92f9968 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go @@ -0,0 +1,36 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f32 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float32, x []float32, incX uintptr, y []float32, incY uintptr, a []float32, lda uintptr) { + + if incX == 1 && incY == 1 { + x = x[:m] + y = y[:n] + for i, xv := range x { + AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) + } + return + } + + var ky, kx uintptr + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + + ix := kx + for i := 0; i < int(m); i++ { + AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], uintptr(n), uintptr(incY), 1, uintptr(ky), 0) + ix += incX + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go new file mode 100644 index 00000000..d0867a46 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go @@ -0,0 +1,55 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package f32 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float32, x []float32) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float32, alpha float32, x []float32) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float32, x []float32, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float32, incDst uintptr, alpha float32, x []float32, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go new file mode 100644 index 00000000..fcbce09e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go @@ -0,0 +1,68 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f32 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float32, x, y []float32) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) + +// DdotUnitary is +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return +func DdotUnitary(x, y []float32) (sum float64) + +// DdotInc is +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return +func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float32) (sum float32) + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go new file mode 100644 index 00000000..3b5b0970 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f32 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float32, x, y []float32) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float32) (sum float32) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} + +// DdotUnitary is +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return +func DdotUnitary(x, y []float32) (sum float64) { + for i, v := range x { + sum += float64(y[i]) * float64(v) + } + return +} + +// DdotInc is +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return +func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { + for i := 0; i < int(n); i++ { + sum += float64(y[iy]) * float64(x[ix]) + ix += incX + iy += incY + } + return +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s new file mode 100644 index 00000000..d9d61bb7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s @@ -0,0 +1,82 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1Norm(x []float64) float64 +TEXT ·L1Norm(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ x_len+8(FP), CX // CX = len(x) + XORQ AX, AX // i = 0 + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVUPS (SI)(AX*8), X8 // X_i = x[i:i+1] + MOVUPS 16(SI)(AX*8), X9 + MOVUPS 32(SI)(AX*8), X10 + MOVUPS 48(SI)(AX*8), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + ADDQ $8, AX // i += 8 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI)(AX*8), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + INCQ AX // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+24(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s new file mode 100644 index 00000000..cac19aa6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s @@ -0,0 +1,90 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1NormInc(x []float64, n, incX int) (sum float64) +TEXT ·L1NormInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ n+24(FP), CX // CX = n + MOVQ incX+32(FP), AX // AX = increment * sizeof( float64 ) + SHLQ $3, AX + MOVQ AX, DX // DX = AX * 3 + IMULQ $3, DX + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = n % 8 + SHRQ $3, CX // CX = floor( n / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum = max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_i[0] = x[i] + MOVSD (SI)(AX*1), X9 + MOVSD (SI)(AX*2), X10 + MOVSD (SI)(DX*1), X11 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + MOVHPD (SI), X8 // X_i[1] = x[i+4] + MOVHPD (SI)(AX*1), X9 + MOVHPD (SI)(AX*2), X10 + MOVHPD (SI)(DX*1), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + ADDQ AX, SI // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+40(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s new file mode 100644 index 00000000..bc0ea6a4 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s @@ -0,0 +1,66 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Add(dst, s []float64) +TEXT ·Add(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE add_end + XORQ AX, AX + MOVQ DI, BX + ANDQ $0x0F, BX // BX = &dst & 15 + JZ add_no_trim // if BX == 0 { goto add_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // i++ + DECQ CX // --CX + JE add_end // if CX == 0 { return } + +add_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ add_tail_start // if CX == 0 { goto add_tail_start } + +add_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD (DI)(AX*8), X0 // X_i += dst[i:i+1] + ADDPD 16(DI)(AX*8), X1 + ADDPD 32(DI)(AX*8), X2 + ADDPD 48(DI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X_i + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP add_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE add_end + +add_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +add_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP add_tail // } while --CX > 0 + +add_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s new file mode 100644 index 00000000..7cc68c78 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s @@ -0,0 +1,53 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Addconst(alpha float64, x []float64) +TEXT ·AddConst(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ x_len+16(FP), CX // CX = len(x) + CMPQ CX, $0 // if len(x) == 0 { return } + JE ac_end + MOVSD alpha+0(FP), X4 // X4 = { a, a } + SHUFPD $0, X4, X4 + MOVUPS X4, X5 // X5 = X4 + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ ac_tail_start // if CX == 0 { goto ac_tail_start } + +ac_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD X4, X0 // X_i += a + ADDPD X5, X1 + ADDPD X4, X2 + ADDPD X5, X3 + MOVUPS X0, (SI)(AX*8) // s[i:i+1] = X_i + MOVUPS X1, 16(SI)(AX*8) + MOVUPS X2, 32(SI)(AX*8) + MOVUPS X3, 48(SI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP ac_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE ac_end + +ac_tail_start: // Reset loop counters + MOVQ BX, CX // Loop counter: CX = BX + +ac_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD X4, X0 // X0 += a + MOVSD X0, (SI)(AX*8) // s[i] = X0 + INCQ AX // ++i + LOOP ac_tail // } while --CX > 0 + +ac_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go new file mode 100644 index 00000000..b8322139 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go @@ -0,0 +1,57 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s new file mode 100644 index 00000000..95fe9f90 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s @@ -0,0 +1,142 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R9 +#define INCx3_DST R12 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR = &y + MOVQ n+56(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+80(FP), INC_X + MOVQ iy+88(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(y[iy]) + MOVQ Y_PTR, DST_PTR // DST_PTR = Y_PTR // Write pointer + + MOVQ incX+64(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+72(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVAPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + // y[i] += alpha * x[i] for the last n % 4 iterations. + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s new file mode 100644 index 00000000..dcb79d87 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s @@ -0,0 +1,148 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DX +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R10 +#define INCx3_DST R13 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+48(FP), X_PTR // X_PTR := &x + MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y + MOVQ n+96(FP), LEN // LEN := n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+120(FP), INC_X + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + MOVQ iy+128(FP), INC_Y + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst]) + MOVQ idst+32(FP), INC_DST + LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy]) + + MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64) + SHLQ $3, INC_DST + MOVSD alpha+40(FP), ALPHA + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVSD ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 2x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4] + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2] + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s new file mode 100644 index 00000000..bc290a15 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitary(alpha float64, x, y []float64) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR := &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+40(FP), LEN + CMOVQLE y_len+40(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + XORQ IDX, IDX + MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha } + SHUFPD $0, ALPHA, ALPHA + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail2_start } + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= a + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if TAIL == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= a + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s new file mode 100644 index 00000000..16798eba --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DX +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+32(FP), X_PTR // X_PTR := &x + MOVQ y_base+56(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+40(FP), LEN // LEN = min( len(x), len(y), len(dst) ) + CMPQ y_len+64(FP), LEN + CMOVQLE y_len+64(FP), LEN + CMPQ dst_len+8(FP), LEN + CMOVQLE dst_len+8(FP), LEN + + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVSD alpha+24(FP), ALPHA + SHUFPD $0, ALPHA, ALPHA // ALPHA := { alpha, alpha } + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= alpha + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= alpha + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s new file mode 100644 index 00000000..32bd1572 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s @@ -0,0 +1,71 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +TEXT ·CumProd(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cp_end + XORQ AX, AX // i = 0 + + MOVSD (SI), X5 // p_prod = { s[0], s[0] } + SHUFPD $0, X5, X5 + MOVSD X5, (DI) // dst[0] = s[0] + INCQ AX // ++i + DECQ CX // -- CX + JZ cp_end // if CX == 0 { return } + + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cp_tail_start // if CX == 0 { goto cp_tail_start } + +cp_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + MULPD X0, X1 // X1 *= X0 + MULPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + MULPD X5, X0 // X0 *= p_prod + MULPD X1, X5 // p_prod *= X1 + MULPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + MULPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cp_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cp_end + +cp_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cp_tail: // do { + MULSD (SI)(AX*8), X5 // p_prod *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_prod + INCQ AX // ++i + LOOP cp_tail // } while --CX > 0 + +cp_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s new file mode 100644 index 00000000..10d7fdab --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s @@ -0,0 +1,64 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +TEXT ·CumSum(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cs_end + XORQ AX, AX // i = 0 + PXOR X5, X5 // p_sum = 0 + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cs_tail_start // if CX == 0 { goto cs_tail_start } + +cs_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + ADDPD X0, X1 // X1 += X0 + ADDPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + ADDPD X5, X0 // X0 += p_sum + ADDPD X1, X5 // p_sum += X1 + ADDPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + ADDPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cs_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cs_end + +cs_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cs_tail: // do { + ADDSD (SI)(AX*8), X5 // p_sum *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_sum + INCQ AX // ++i + LOOP cs_tail // } while --CX > 0 + +cs_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s new file mode 100644 index 00000000..1a4e9eec --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s @@ -0,0 +1,67 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Div(dst, s []float64) +TEXT ·Div(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ SI, BX + ANDQ $15, BX // BX = &s & 15 + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (DI)(AX*8), X0 // X0 = dst[i:i+1] + MOVUPS 16(DI)(AX*8), X1 + MOVUPS 32(DI)(AX*8), X2 + MOVUPS 48(DI)(AX*8), X3 + DIVPD (SI)(AX*8), X0 // X0 /= s[i:i+1] + DIVPD 16(SI)(AX*8), X1 + DIVPD 32(SI)(AX*8), X2 + DIVPD 48(SI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + RET + diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s new file mode 100644 index 00000000..16ab9b7e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s @@ -0,0 +1,73 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func DivTo(dst, x, y []float64) +TEXT ·DivTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ x_base+24(FP), SI // SI = &x + MOVQ y_base+48(FP), DX // DX = &y + CMPQ x_len+32(FP), CX // CX = max( len(dst), len(x), len(y) ) + CMOVQLE x_len+32(FP), CX + CMPQ y_len+56(FP), CX + CMOVQLE y_len+56(FP), CX + MOVQ CX, ret_len+80(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ DX, BX + ANDQ $15, BX // BX = &y & OxF + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + DIVSD (DX)(AX*8), X0 // X0 /= t[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X0 = x[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + DIVPD (DX)(AX*8), X0 // X0 /= y[i:i+1] + DIVPD 16(DX)(AX*8), X1 + DIVPD 32(DX)(AX*8), X2 + DIVPD 48(DX)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = x[i] + DIVSD (DX)(AX*8), X0 // X0 /= y[i] + MOVSD X0, (DI)(AX*8) + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + MOVQ DI, ret_base+72(FP) // &ret = &dst + MOVQ dst_cap+16(FP), DI // cap(ret) = cap(dst) + MOVQ DI, ret_cap+88(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go new file mode 100644 index 00000000..d0d7ae04 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f64 provides float64 vector primitives. +package f64 diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go new file mode 100644 index 00000000..b77138d1 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go @@ -0,0 +1,35 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s new file mode 100644 index 00000000..eff25059 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s @@ -0,0 +1,145 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func DdotUnitary(x, y []float64) (sum float64) +// This function assumes len(y) >= len(x). +TEXT ·DotUnitary(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ x_len+8(FP), DI // n = len(x) + MOVQ y+24(FP), R9 + + MOVSD $(0.0), X7 // sum = 0 + MOVSD $(0.0), X8 // sum = 0 + + MOVQ $0, SI // i = 0 + SUBQ $4, DI // n -= 4 + JL tail_uni // if n < 0 goto tail_uni + +loop_uni: + // sum += x[i] * y[i] unrolled 4x. + MOVUPD 0(R8)(SI*8), X0 + MOVUPD 0(R9)(SI*8), X1 + MOVUPD 16(R8)(SI*8), X2 + MOVUPD 16(R9)(SI*8), X3 + MULPD X1, X0 + MULPD X3, X2 + ADDPD X0, X7 + ADDPD X2, X8 + + ADDQ $4, SI // i += 4 + SUBQ $4, DI // n -= 4 + JGE loop_uni // if n >= 0 goto loop_uni + +tail_uni: + ADDQ $4, DI // n += 4 + JLE end_uni // if n <= 0 goto end_uni + +onemore_uni: + // sum += x[i] * y[i] for the remaining 1-3 elements. + MOVSD 0(R8)(SI*8), X0 + MOVSD 0(R9)(SI*8), X1 + MULSD X1, X0 + ADDSD X0, X7 + + ADDQ $1, SI // i++ + SUBQ $1, DI // n-- + JNZ onemore_uni // if n != 0 goto onemore_uni + +end_uni: + // Add the four sums together. + ADDPD X8, X7 + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+48(FP) // Return final sum. + RET + +// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) +TEXT ·DotInc(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ y+24(FP), R9 + MOVQ n+48(FP), CX + MOVQ incX+56(FP), R11 + MOVQ incY+64(FP), R12 + MOVQ ix+72(FP), R13 + MOVQ iy+80(FP), R14 + + MOVSD $(0.0), X7 // sum = 0 + LEAQ (R8)(R13*8), SI // p = &x[ix] + LEAQ (R9)(R14*8), DI // q = &y[ix] + SHLQ $3, R11 // incX *= sizeof(float64) + SHLQ $3, R12 // indY *= sizeof(float64) + + SUBQ $2, CX // n -= 2 + JL tail_inc // if n < 0 goto tail_inc + +loop_inc: + // sum += *p * *q unrolled 2x. + MOVHPD (SI), X0 + MOVHPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + MOVLPD (SI), X0 + MOVLPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + + MULPD X1, X0 + ADDPD X0, X7 + + SUBQ $2, CX // n -= 2 + JGE loop_inc // if n >= 0 goto loop_inc + +tail_inc: + ADDQ $2, CX // n += 2 + JLE end_inc // if n <= 0 goto end_inc + + // sum += *p * *q for the last iteration if n is odd. + MOVSD (SI), X0 + MULSD (DI), X0 + ADDSD X0, X7 + +end_inc: + // Add the two sums together. + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+88(FP) // Return final sum. + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go new file mode 100644 index 00000000..00c99e93 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go @@ -0,0 +1,22 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f64 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) + +// GemvN computes +// y = alpha * A * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) + +// GemvT computes +// y = alpha * A^T * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go new file mode 100644 index 00000000..5a2c1d35 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go @@ -0,0 +1,118 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) { + if incX == 1 && incY == 1 { + x = x[:m] + y = y[:n] + for i, xv := range x { + AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) + } + return + } + + var ky, kx uintptr + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + + ix := kx + for i := 0; i < int(m); i++ { + AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], n, incY, 1, ky, 0) + ix += incX + } +} + +// GemvN computes +// y = alpha * A * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(n-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(m-1) * int(incY)) + } + + if incX == 1 && incY == 1 { + if beta == 0 { + for i = 0; i < m; i++ { + y[i] = alpha * DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + for i = 0; i < m; i++ { + y[i] = y[i]*beta + alpha*DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + iy := ky + if beta == 0 { + for i = 0; i < m; i++ { + y[iy] = alpha * DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } + return + } + for i = 0; i < m; i++ { + y[iy] = y[iy]*beta + alpha*DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } +} + +// GemvT computes +// y = alpha * A^T * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + switch { + case beta == 0: // beta == 0 is special-cased to memclear + if incY == 1 { + for i := range y { + y[i] = 0 + } + } else { + iy := ky + for i := 0; i < int(n); i++ { + y[iy] = 0 + iy += incY + } + } + case int(incY) < 0: + ScalInc(beta, y, n, uintptr(int(-incY))) + case incY == 1: + ScalUnitary(beta, y) + default: + ScalInc(beta, y, n, incY) + } + + if incX == 1 && incY == 1 { + for i = 0; i < m; i++ { + AxpyUnitaryTo(y, alpha*x[i], a[lda*i:lda*i+n], y) + } + return + } + ix := kx + for i = 0; i < m; i++ { + AxpyInc(alpha*x[ix], a[lda*i:lda*i+n], y, n, 1, incY, 0, ky) + ix += incX + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s new file mode 100644 index 00000000..2abdddd8 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s @@ -0,0 +1,685 @@ +// Copyright ©2017 The gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define INC_X R8 +#define INC3_X R9 + +#define Y_PTR DX +#define Y y_base+96(FP) +#define INC_Y R10 +#define INC3_Y R11 + +#define A_ROW AX +#define A_PTR DI +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + XORPS X0, X0 \ + XORPS X1, X1 \ + XORPS X2, X2 \ + XORPS X3, X3 + +#define INIT2 \ + XORPS X0, X0 \ + XORPS X1, X1 + +#define INIT1 \ + XORPS X0, X0 + +#define KERNEL_LOAD4 \ + MOVUPS (X_PTR), X12 \ + MOVUPS 2*SIZE(X_PTR), X13 + +#define KERNEL_LOAD2 \ + MOVUPS (X_PTR), X12 + +#define KERNEL_LOAD4_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 \ + MOVSD (X_PTR)(INC_X*2), X13 \ + MOVHPD (X_PTR)(INC3_X*1), X13 + +#define KERNEL_LOAD2_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MOVUPS (A_PTR)(LDA*2), X8 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X9 \ + MOVUPS (A_PTR)(LDA3*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X11 \ + MULPD X12, X4 \ + MULPD X13, X5 \ + MULPD X12, X6 \ + MULPD X13, X7 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X1 \ + ADDPD X7, X1 \ + ADDPD X8, X2 \ + ADDPD X9, X2 \ + ADDPD X10, X3 \ + ADDPD X11, X3 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + MULPD X12, X6 \ + MULPD X12, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X2 \ + ADDPD X7, X3 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X4 \ + MOVHPD (A_PTR)(LDA*1), X4 \ + MOVSD (A_PTR)(LDA*2), X5 \ + MOVHPD (A_PTR)(LDA3*1), X5 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X2 \ + ADDQ $SIZE, A_PTR + +#define STORE4 \ + MOVUPS (Y_PTR), X4 \ + MOVUPS 2*SIZE(Y_PTR), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVUPS X4, (Y_PTR) \ + MOVUPS X5, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MOVSD (Y_PTR)(INC_Y*2), X5 \ + MOVHPD (Y_PTR)(INC3_Y*1), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVLPD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) \ + MOVLPD X5, (Y_PTR)(INC_Y*2) \ + MOVHPD X5, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X11 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X8, X0 \ + ADDPD X10, X1 \ + ADDPD X9, X0 \ + ADDPD X11, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X8 \ + MOVUPS (A_PTR)(LDA*1), X9 \ + MULPD X12, X8 \ + MULPD X12, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X1 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MOVHPD (A_PTR)(LDA*1), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE2 \ + MOVUPS (Y_PTR), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVUPS X4, (Y_PTR) + +#define STORE2_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVSD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X0 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVUPS (A_PTR), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MULSD X12, X8 \ + ADDSD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE1 \ + HADDPD X0, X0 \ + MOVSD (Y_PTR), X4 \ + MULSD ALPHA, X0 \ + MULSD BETA, X4 \ + ADDSD X0, X4 \ + MOVSD X4, (Y_PTR) + +// func GemvN(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvN(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + MOVDDUP beta+88(FP), BETA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y // INC_Y = incY * sizeof(float64) + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $0.0, X0 + COMISD BETA, X0 + JNE gemv_start // if beta != 0 { goto gemv_start } + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + CMPQ incY+120(FP), $1 // Check for dense vector X (fast-path) + JNE inc_clear + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + + JMP prep_end + +inc_clear: + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + CMPQ incX+80(FP), $1 // Check for dense vector X (fast-path) + JNE inc + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r4c4 + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + + ADDQ $2*SIZE, X_PTR + +r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ $SIZE, X_PTR + +r4end: + CMPQ INC_Y, $SIZE + JNZ r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP r4inc + +r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + + ADDQ $2*SIZE, X_PTR + +r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ $SIZE, X_PTR + +r2end: + CMPQ INC_Y, $SIZE + JNE r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP r2inc + +r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + + ADDQ $2*SIZE, X_PTR + +r1c1: + + TESTQ $1, N_DIM + JZ r1end + + // 1x1 KERNEL + KERNEL_1x1 + +r1end: + STORE1 + +end: + RET + +inc: // Algorithm for incX != 1 ( split loads in kernel ) + MOVQ incX+80(FP), INC_X // INC_X = incX + + XORQ TMP2, TMP2 // TMP2 = 0 + MOVQ N, TMP1 // TMP1 = N + SUBQ $1, TMP1 // TMP1 -= 1 + NEGQ TMP1 // TMP1 = -TMP1 + IMULQ INC_X, TMP1 // TMP1 *= INC_X + CMPQ INC_X, $0 // if INC_X < 0 { TMP2 = TMP1 } + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR // X_PTR = X_PTR[TMP2] + MOVQ X_PTR, X // X = X_PTR + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + + DECQ N + JNZ inc_r4c4 + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ INC_X, X_PTR + +inc_r4end: + CMPQ INC_Y, $SIZE + JNE inc_r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP inc_r4inc + +inc_r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ INC_X, X_PTR + +inc_r2end: + CMPQ INC_Y, $SIZE + JNE inc_r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP inc_r2inc + +inc_r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ inc_r1end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_r1end: + STORE1 + +inc_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s new file mode 100644 index 00000000..87ba5cbf --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s @@ -0,0 +1,745 @@ +// Copyright ©2017 The gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM n+8(FP) +#define M CX +#define N_DIM m+0(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define Y_PTR DX +#define Y y_base+96(FP) +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MOVDDUP (X_PTR)(INC_X*2), X10 \ + MOVDDUP (X_PTR)(INC3_X*1), X11 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 \ + MULPD ALPHA, X10 \ + MULPD ALPHA, X11 + +#define INIT2 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 + +#define INIT1 \ + MOVDDUP (X_PTR), X8 \ + MULPD ALPHA, X8 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X0 \ + MOVUPS 2*SIZE(Y_PTR), X1 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X0 + +#define KERNEL_LOAD4_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 \ + MOVSD (Y_PTR)(INC_Y*2), X1 \ + MOVHPD (Y_PTR)(INC3_Y*1), X1 + +#define KERNEL_LOAD2_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + MOVUPS (A_PTR)(LDA*2), X4 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X5 \ + MOVUPS (A_PTR)(LDA3*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X7 \ + MULPD X10, X4 \ + MULPD X10, X5 \ + MULPD X11, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDQ $4*SIZE, A_PTR + +#define STORE4 \ + MOVUPS X0, (Y_PTR) \ + MOVUPS X1, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) \ + MOVLPD X1, (Y_PTR)(INC_Y*2) \ + MOVHPD X1, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + MULPD X10, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X0 \ + ADDPD X7, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVUPS (A_PTR), X4 \ + MULPD X8, X4 \ + ADDPD X4, X0 \ + ADDQ $2*SIZE, A_PTR + +#define STORE2 \ + MOVUPS X0, (Y_PTR) + +#define STORE2_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MOVSD (A_PTR)(LDA*2), X6 \ + MOVSD (A_PTR)(LDA3*1), X7 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + MULSD X10, X6 \ + MULSD X11, X7 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + ADDSD X6, X0 \ + ADDSD X7, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MULSD X8, X4 \ + ADDSD X4, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define SCALE_8(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MOVUPS 32(PTR), X2 \ + MOVUPS 48(PTR), X3 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MULPD SCAL, X2 \ + MULPD SCAL, X3 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + MOVUPS X2, 32(PTR) \ + MOVUPS X3, 48(PTR) + +#define SCALE_4(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + +#define SCALE_2(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MULPD SCAL, X0 \ + MOVUPS X0, (PTR) \ + +#define SCALE_1(PTR, SCAL) \ + MOVSD (PTR), X0 \ + MULSD SCAL, X0 \ + MOVSD X0, (PTR) \ + +#define SCALEINC_4(PTR, INC, INC3, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MOVSD (PTR)(INC*2), X2 \ + MOVSD (PTR)(INC3*1), X3 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MULSD SCAL, X2 \ + MULSD SCAL, X3 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) \ + MOVSD X2, (PTR)(INC*2) \ + MOVSD X3, (PTR)(INC3*1) + +#define SCALEINC_2(PTR, INC, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) + +// func GemvT(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvT(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y // INC_Y = incY * sizeof(float64) + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + MOVQ incX+80(FP), INC_X // INC_X = incX * sizeof(float64) + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + NEGQ TMP1 + IMULQ INC_X, TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + MOVQ X_PTR, X + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + CMPQ incY+120(FP), $1 // Check for dense vector Y (fast-path) + JNE inc + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $3, M + JZ scal4 + +scal8: + SCALE_8(Y_PTR, BETA) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ scal8 + +scal4: + TESTQ $4, M_DIM + JZ scal2 + SCALE_4(Y_PTR, BETA) + ADDQ $4*SIZE, Y_PTR + +scal2: + TESTQ $2, M_DIM + JZ scal1 + SCALE_2(Y_PTR, BETA) + ADDQ $2*SIZE, Y_PTR + +scal1: + TESTQ $1, M_DIM + JZ prep_end + SCALE_1(Y_PTR, BETA) + + JMP prep_end + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + SHRQ $2, N + JZ c2 + +c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c4r2 + +c4r4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c4r4 + +c4r2: + TESTQ $2, M_DIM + JZ c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x4 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c4r1: + TESTQ $1, M_DIM + JZ c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ $SIZE, Y_PTR + +c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ c4 + +c2: + TESTQ $2, N_DIM + JZ c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c2r2 + +c2r4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x2 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c2r4 + +c2r2: + TESTQ $2, M_DIM + JZ c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c2r1: + TESTQ $1, M_DIM + JZ c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ $SIZE, Y_PTR + +c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +c1: + TESTQ $1, N_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c1r2 + +c1r4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x1 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c1r4 + +c1r2: + TESTQ $2, M_DIM + JZ c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x1 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c1r1: + TESTQ $1, M_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + +end: + RET + +inc: // Algorithm for incX != 0 ( split loads in kernel ) + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $2, M + JZ inc_scal2 + +inc_scal4: + SCALEINC_4(Y_PTR, INC_Y, INC3_Y, BETA) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_scal4 + +inc_scal2: + TESTQ $2, M_DIM + JZ inc_scal1 + + SCALEINC_2(Y_PTR, INC_Y, BETA) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_scal1: + TESTQ $1, M_DIM + JZ inc_prep_end + SCALE_1(Y_PTR, BETA) + + JMP inc_prep_end + +inc_gemv_clear: // beta == 0 is special-cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ inc_prep_end + MOVSD X0, (Y_PTR) + +inc_prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +inc_gemv_start: + SHRQ $2, N + JZ inc_c2 + +inc_c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c4r2 + +inc_c4r4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + + DECQ M + JNZ inc_c4r4 + +inc_c4r2: + TESTQ $2, M_DIM + JZ inc_c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x4 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c4r1: + TESTQ $1, M_DIM + JZ inc_c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ INC_Y, Y_PTR + +inc_c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ inc_c4 + +inc_c2: + TESTQ $2, N_DIM + JZ inc_c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c2r2 + +inc_c2r4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x2 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c2r4 + +inc_c2r2: + TESTQ $2, M_DIM + JZ inc_c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c2r1: + TESTQ $1, M_DIM + JZ inc_c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ INC_Y, Y_PTR + +inc_c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_c1: + TESTQ $1, N_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c1r2 + +inc_c1r4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x1 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c1r4 + +inc_c1r2: + TESTQ $2, M_DIM + JZ inc_c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x1 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c1r1: + TESTQ $1, M_DIM + JZ inc_end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s new file mode 100644 index 00000000..8c1b36a6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s @@ -0,0 +1,591 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define Y y_base+56(FP) +#define Y_PTR DX +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X0 + +#define LOAD4 \ + PREFETCHNTA (X_PTR )(INC_X*8) \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MOVDDUP (X_PTR)(INC_X*2), X3 \ + MOVDDUP (X_PTR)(INC3_X*1), X4 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 \ + MULPD ALPHA, X3 \ + MULPD ALPHA, X4 + +#define LOAD2 \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 + +#define LOAD1 \ + MOVDDUP (X_PTR), X1 \ + MULPD ALPHA, X1 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X5 \ + MOVUPS 2*SIZE(Y_PTR), X6 + +#define KERNEL_LOAD4_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 \ + MOVLPD (Y_PTR)(INC_Y*2), X6 \ + MOVHPD (Y_PTR)(INC3_Y*1), X6 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X5 + +#define KERNEL_LOAD2_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 + +#define KERNEL_4x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MOVUPS X5, X9 \ + MOVUPS X6, X10 \ + MOVUPS X5, X11 \ + MOVUPS X6, X12 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 \ + MULPD X3, X9 \ + MULPD X3, X10 \ + MULPD X4, X11 \ + MULPD X4, X12 + +#define STORE_4x4 \ + MOVUPS (A_PTR), X13 \ + ADDPD X13, X5 \ + MOVUPS 2*SIZE(A_PTR), X14 \ + ADDPD X14, X6 \ + MOVUPS (A_PTR)(LDA*1), X15 \ + ADDPD X15, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X0 \ + ADDPD X0, X8 \ + MOVUPS (A_PTR)(LDA*2), X13 \ + ADDPD X13, X9 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X14 \ + ADDPD X14, X10 \ + MOVUPS (A_PTR)(LDA3*1), X15 \ + ADDPD X15, X11 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X0 \ + ADDPD X0, X12 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + MOVUPS X9, (A_PTR)(LDA*2) \ + MOVUPS X10, 2*SIZE(A_PTR)(LDA*2) \ + MOVUPS X11, (A_PTR)(LDA3*1) \ + MOVUPS X12, 2*SIZE(A_PTR)(LDA3*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPD X1, X5 \ + MULPD X2, X6 \ + MULPD X3, X7 \ + MULPD X4, X8 + +#define STORE_4x2 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*2), X11 \ + ADDPD X11, X7 \ + MOVUPS (A_PTR)(LDA3*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + MOVUPS X7, (A_PTR)(LDA*2) \ + MOVUPS X8, (A_PTR)(LDA3*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MOVSD X5, X7 \ + MOVSD X5, X8 \ + MULSD X1, X5 \ + MULSD X2, X6 \ + MULSD X3, X7 \ + MULSD X4, X8 + +#define STORE_4x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + ADDSD (A_PTR)(LDA*2), X7 \ + ADDSD (A_PTR)(LDA3*1), X8 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + MOVSD X7, (A_PTR)(LDA*2) \ + MOVSD X8, (A_PTR)(LDA3*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_2x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 + +#define STORE_2x4 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS 2*SIZE(A_PTR), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPD X11, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS X5, X6 \ + MULPD X1, X5 \ + MULPD X2, X6 + +#define STORE_2x2 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS (A_PTR)(LDA*1), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MULSD X1, X5 \ + MULSD X2, X6 + +#define STORE_2x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x4 \ + MULPD X1, X5 \ + MULPD X1, X6 + +#define STORE_1x4 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS 2*SIZE(A_PTR), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MULPD X1, X5 + +#define STORE_1x2 \ + MOVUPS (A_PTR), X6 \ + ADDPD X6, X5 \ + MOVUPS X5, (A_PTR) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X5 \ + MULSD X1, X5 + +#define STORE_1x1 \ + ADDSD (A_PTR), X5 \ + MOVSD X5, (A_PTR) \ + ADDQ $SIZE, A_PTR + +// func Ger(m, n uintptr, alpha float64, +// x []float64, incX uintptr, +// y []float64, incY uintptr, +// a []float64, lda uintptr) +TEXT ·Ger(SB), NOSPLIT, $0 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+24(FP), X_PTR + MOVQ y_base+56(FP), Y_PTR + MOVQ a_base+88(FP), A_ROW + MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_X, TMP1 + NEGQ TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + + CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) + JG inc + JL end + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE_4x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + STORE_4x2 + + ADDQ $2*SIZE, Y_PTR + +r4c1: + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ $SIZE, Y_PTR + +r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + STORE_2x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE_2x2 + + ADDQ $2*SIZE, Y_PTR + +r2c1: + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ $SIZE, Y_PTR + +r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + STORE_1x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + STORE_1x2 + + ADDQ $2*SIZE, Y_PTR + +r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ $SIZE, Y_PTR + +end: + RET + +inc: // Algorithm for incY != 1 ( split loads in kernel ) + + MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE_4x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + STORE_4x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r4c1: + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ INC_Y, Y_PTR + +inc_r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + STORE_2x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE_2x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2c1: + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ INC_Y, Y_PTR + +inc_r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + STORE_1x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + STORE_1x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ INC_Y, Y_PTR + +inc_end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s new file mode 100644 index 00000000..f87f856c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s @@ -0,0 +1,58 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1Dist(s, t []float64) float64 +TEXT ·L1Dist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDPD X0, X3 // norm += X0 + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // x1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDSD X0, X3 // norm += X0 + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + ADDSD X3, X2 // X2 = X3[1] + X3[0] + MOVSD X2, ret+48(FP) // return X2 + RET + diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s new file mode 100644 index 00000000..b0625928 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func LinfDist(s, t []float64) float64 +TEXT ·LinfDist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXPD X0, X3 // norm = max( norm, X0 ) + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // X1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXSD X0, X3 // norm = max( norm, X0 ) + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + MAXSD X3, X2 // X2 = max( X3[1], X3[0] ) + MOVSD X2, ret+48(FP) // return X2 + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go new file mode 100644 index 00000000..3cc7aca6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s new file mode 100644 index 00000000..fb8b545e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalInc(alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalInc(SB), NOSPLIT, $0 + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ incX+40(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + MOVQ n+32(FP), LEN // LEN = n + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + MOVSD X4, (X_PTR)(INC_X*2) + MOVSD X5, (X_PTR)(INCx3_X*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: // do { + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (X_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s new file mode 100644 index 00000000..186fd1c0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s @@ -0,0 +1,122 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define DST_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define INC_DST R10 +#define INCx3_DST R11 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst + SHLQ $3, INC_DST // INC_DST *= sizeof(float64) + MOVSD alpha+32(FP), ALPHA // ALPHA = alpha + MOVQ x_base+40(FP), X_PTR // X_PTR = &x + MOVQ n+64(FP), LEN // LEN = n + MOVQ incX+72(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incDst*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s new file mode 100644 index 00000000..f852c7f7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s @@ -0,0 +1,112 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // @ MOVDDUP XMM0, 8[RSP] + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitary(alpha float64, x []float64) +TEXT ·ScalUnitary(SB), NOSPLIT, $0 + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ x_len+16(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + XORQ IDX, IDX // IDX = 0 + + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 + +loop: // do { // x[i] *= alpha unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + MOVUPS X3, 16(X_PTR)(IDX*8) + MOVUPS X4, 32(X_PTR)(IDX*8) + MOVUPS X5, 48(X_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if n == 0 goto end + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + // x[i] *= alpha for the remaining element. + MOVSD (X_PTR)(IDX*8), X2 + MULSD ALPHA, X2 + MOVSD X2, (X_PTR)(IDX*8) + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s new file mode 100644 index 00000000..d2b607f5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x2024 // @ MOVDDUP 32(SP), X0 /*XMM0, 32[RSP]*/ + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitaryTo(dst []float64, alpha float64, x []float64) +// This function assumes len(dst) >= len(x). +TEXT ·ScalUnitaryTo(SB), NOSPLIT, $0 + MOVQ x_base+32(FP), X_PTR // X_PTR = &x + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_len+40(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + +loop: // do { // dst[i] = alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop counters + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail_one } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR)(IDX*8) // dst[i] = X_i + +end: + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go new file mode 100644 index 00000000..f6cf96ca --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go @@ -0,0 +1,165 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f64 + +// L1Norm is +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) + +// L1NormInc is +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) + +// AddConst is +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) + +// Add is +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// CumSum is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 + +// CumProd is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 + +// Div is +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) + +// DivTo is +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, x, y []float64) []float64 + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) + +// L1Dist is +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 + +// LinfDist is +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go new file mode 100644 index 00000000..eae620b1 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go @@ -0,0 +1,157 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +import "math" + +// L1Norm is +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) { + for _, v := range x { + sum += math.Abs(v) + } + return sum +} + +// L1NormInc is +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) { + for i := 0; i < n*incX; i += incX { + sum += math.Abs(x[i]) + } + return sum +} + +// Add is +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) { + for i, v := range s { + dst[i] += v + } +} + +// AddConst is +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) { + for i := range x { + x[i] += alpha + } +} + +// CumSum is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] + v + } + return dst +} + +// CumProd is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] * v + } + return dst +} + +// Div is +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) { + for i, v := range s { + dst[i] /= v + } +} + +// DivTo is +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, s, t []float64) []float64 { + for i, v := range s { + dst[i] = v / t[i] + } + return dst +} + +// L1Dist is +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 { + var norm float64 + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm +} + +// LinfDist is +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 { + var norm float64 + if len(s) == 0 { + return 0 + } + norm = math.Abs(t[0] - s[0]) + for i, v := range s[1:] { + absDiff := math.Abs(t[i+1] - v) + if absDiff > norm || math.IsNaN(norm) { + norm = absDiff + } + } + return norm +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/doc.go new file mode 100644 index 00000000..f3ab2235 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/doc.go @@ -0,0 +1,7 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package math32 provides float32 versions of standard library math package +// routines used by gonum/blas/native. +package math32 diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/math.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/math.go new file mode 100644 index 00000000..56c90be0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/math.go @@ -0,0 +1,111 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math32 + +import ( + "math" +) + +const ( + unan = 0x7fc00000 + uinf = 0x7f800000 + uneginf = 0xff800000 + mask = 0x7f8 >> 3 + shift = 32 - 8 - 1 + bias = 127 +) + +// Abs returns the absolute value of x. +// +// Special cases are: +// Abs(±Inf) = +Inf +// Abs(NaN) = NaN +func Abs(x float32) float32 { + switch { + case x < 0: + return -x + case x == 0: + return 0 // return correctly abs(-0) + } + return x +} + +// Copysign returns a value with the magnitude +// of x and the sign of y. +func Copysign(x, y float32) float32 { + const sign = 1 << 31 + return math.Float32frombits(math.Float32bits(x)&^sign | math.Float32bits(y)&sign) +} + +// Hypot returns Sqrt(p*p + q*q), taking care to avoid +// unnecessary overflow and underflow. +// +// Special cases are: +// Hypot(±Inf, q) = +Inf +// Hypot(p, ±Inf) = +Inf +// Hypot(NaN, q) = NaN +// Hypot(p, NaN) = NaN +func Hypot(p, q float32) float32 { + // special cases + switch { + case IsInf(p, 0) || IsInf(q, 0): + return Inf(1) + case IsNaN(p) || IsNaN(q): + return NaN() + } + if p < 0 { + p = -p + } + if q < 0 { + q = -q + } + if p < q { + p, q = q, p + } + if p == 0 { + return 0 + } + q = q / p + return p * Sqrt(1+q*q) +} + +// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0. +func Inf(sign int) float32 { + var v uint32 + if sign >= 0 { + v = uinf + } else { + v = uneginf + } + return math.Float32frombits(v) +} + +// IsInf reports whether f is an infinity, according to sign. +// If sign > 0, IsInf reports whether f is positive infinity. +// If sign < 0, IsInf reports whether f is negative infinity. +// If sign == 0, IsInf reports whether f is either infinity. +func IsInf(f float32, sign int) bool { + // Test for infinity by comparing against maximum float. + // To avoid the floating-point hardware, could use: + // x := math.Float32bits(f); + // return sign >= 0 && x == uinf || sign <= 0 && x == uneginf; + return sign >= 0 && f > math.MaxFloat32 || sign <= 0 && f < -math.MaxFloat32 +} + +// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value. +func IsNaN(f float32) (is bool) { + // IEEE 754 says that only NaNs satisfy f != f. + // To avoid the floating-point hardware, could use: + // x := math.Float32bits(f); + // return uint32(x>>shift)&mask == mask && x != uinf && x != uneginf + return f != f +} + +// NaN returns an IEEE 754 ``not-a-number'' value. +func NaN() float32 { return math.Float32frombits(unan) } diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/signbit.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/signbit.go new file mode 100644 index 00000000..3e9f0bb4 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/signbit.go @@ -0,0 +1,16 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math32 + +import "math" + +// Signbit returns true if x is negative or negative zero. +func Signbit(x float32) bool { + return math.Float32bits(x)&(1<<31) != 0 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go new file mode 100644 index 00000000..bf630de9 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go @@ -0,0 +1,25 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package math32 + +import ( + "math" +) + +// Sqrt returns the square root of x. +// +// Special cases are: +// Sqrt(+Inf) = +Inf +// Sqrt(±0) = ±0 +// Sqrt(x < 0) = NaN +// Sqrt(NaN) = NaN +func Sqrt(x float32) float32 { + // FIXME(kortschak): Direct translation of the math package + // asm code for 386 fails to build. No test hardware is available + // for arm, so using conversion instead. + return float32(math.Sqrt(float64(x))) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go new file mode 100644 index 00000000..905ae5c6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package math32 + +// Sqrt returns the square root of x. +// +// Special cases are: +// Sqrt(+Inf) = +Inf +// Sqrt(±0) = ±0 +// Sqrt(x < 0) = NaN +// Sqrt(NaN) = NaN +func Sqrt(x float32) float32 diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s new file mode 100644 index 00000000..fa2b8696 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +// TODO(kortschak): use textflag.h after we drop Go 1.3 support +//#include "textflag.h" +// Don't insert stack check preamble. +#define NOSPLIT 4 + +// func Sqrt(x float32) float32 +TEXT ·Sqrt(SB),NOSPLIT,$0 + SQRTSS x+0(FP), X0 + MOVSS X0, ret+8(FP) + RET diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/.gitignore b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/README.md b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/README.md new file mode 100644 index 00000000..c355017c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/README.md @@ -0,0 +1,28 @@ +Gonum LAPACK [![GoDoc](https://godoc.org/gonum.org/v1/gonum/lapack?status.svg)](https://godoc.org/gonum.org/v1/gonum/lapack) +====== + +A collection of packages to provide LAPACK functionality for the Go programming +language (http://golang.org). This provides a partial implementation in native go +and a wrapper using cgo to a c-based implementation. + +## Installation + +``` + go get gonum.org/v1/gonum/lapack/... +``` + +## Packages + +### lapack + +Defines the LAPACK API based on http://www.netlib.org/lapack/lapacke.html + +### lapack/gonum + +Go implementation of the LAPACK API (incomplete, implements the `float64` API). + +### lapack/lapack64 + +Wrappers for an implementation of the double (i.e., `float64`) precision real parts of +the LAPACK API. + diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/doc.go new file mode 100644 index 00000000..da2759cd --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lapack provides interfaces for the LAPACK linear algebra standard. +package lapack diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go new file mode 100644 index 00000000..dd6e8b3a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go @@ -0,0 +1,488 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dbdsqr performs a singular value decomposition of a real n×n bidiagonal matrix. +// +// The SVD of the bidiagonal matrix B is +// B = Q * S * P^T +// where S is a diagonal matrix of singular values, Q is an orthogonal matrix of +// left singular vectors, and P is an orthogonal matrix of right singular vectors. +// +// Q and P are only computed if requested. If left singular vectors are requested, +// this routine returns U * Q instead of Q, and if right singular vectors are +// requested P^T * VT is returned instead of P^T. +// +// Frequently Dbdsqr is used in conjunction with Dgebrd which reduces a general +// matrix A into bidiagonal form. In this case, the SVD of A is +// A = (U * Q) * S * (P^T * VT) +// +// This routine may also compute Q^T * C. +// +// d and e contain the elements of the bidiagonal matrix b. d must have length at +// least n, and e must have length at least n-1. Dbdsqr will panic if there is +// insufficient length. On exit, D contains the singular values of B in decreasing +// order. +// +// VT is a matrix of size n×ncvt whose elements are stored in vt. The elements +// of vt are modified to contain P^T * VT on exit. VT is not used if ncvt == 0. +// +// U is a matrix of size nru×n whose elements are stored in u. The elements +// of u are modified to contain U * Q on exit. U is not used if nru == 0. +// +// C is a matrix of size n×ncc whose elements are stored in c. The elements +// of c are modified to contain Q^T * C on exit. C is not used if ncc == 0. +// +// work contains temporary storage and must have length at least 4*(n-1). Dbdsqr +// will panic if there is insufficient working memory. +// +// Dbdsqr returns whether the decomposition was successful. +// +// Dbdsqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dbdsqr(uplo blas.Uplo, n, ncvt, nru, ncc int, d, e, vt []float64, ldvt int, u []float64, ldu int, c []float64, ldc int, work []float64) (ok bool) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if ncvt != 0 { + checkMatrix(n, ncvt, vt, ldvt) + } + if nru != 0 { + checkMatrix(nru, n, u, ldu) + } + if ncc != 0 { + checkMatrix(n, ncc, c, ldc) + } + if len(d) < n { + panic(badD) + } + if len(e) < n-1 { + panic(badE) + } + if len(work) < 4*(n-1) { + panic(badWork) + } + var info int + bi := blas64.Implementation() + const ( + maxIter = 6 + ) + if n == 0 { + return true + } + if n != 1 { + // If the singular vectors do not need to be computed, use qd algorithm. + if !(ncvt > 0 || nru > 0 || ncc > 0) { + info = impl.Dlasq1(n, d, e, work) + // If info is 2 dqds didn't finish, and so try to. + if info != 2 { + return info == 0 + } + info = 0 + } + nm1 := n - 1 + nm12 := nm1 + nm1 + nm13 := nm12 + nm1 + idir := 0 + + eps := dlamchE + unfl := dlamchS + lower := uplo == blas.Lower + var cs, sn, r float64 + if lower { + for i := 0; i < n-1; i++ { + cs, sn, r = impl.Dlartg(d[i], e[i]) + d[i] = r + e[i] = sn * d[i+1] + d[i+1] *= cs + work[i] = cs + work[nm1+i] = sn + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, n, work, work[n-1:], u, ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, n, ncc, work, work[n-1:], c, ldc) + } + } + // Compute singular values to a relative accuracy of tol. If tol is negative + // the values will be computed to an absolute accuracy of math.Abs(tol) * norm(b) + tolmul := math.Max(10, math.Min(100, math.Pow(eps, -1.0/8))) + tol := tolmul * eps + var smax float64 + for i := 0; i < n; i++ { + smax = math.Max(smax, math.Abs(d[i])) + } + for i := 0; i < n-1; i++ { + smax = math.Max(smax, math.Abs(e[i])) + } + + var sminl float64 + var thresh float64 + if tol >= 0 { + sminoa := math.Abs(d[0]) + if sminoa != 0 { + mu := sminoa + for i := 1; i < n; i++ { + mu = math.Abs(d[i]) * (mu / (mu + math.Abs(e[i-1]))) + sminoa = math.Min(sminoa, mu) + if sminoa == 0 { + break + } + } + } + sminoa = sminoa / math.Sqrt(float64(n)) + thresh = math.Max(tol*sminoa, float64(maxIter*n*n)*unfl) + } else { + thresh = math.Max(math.Abs(tol)*smax, float64(maxIter*n*n)*unfl) + } + // Prepare for the main iteration loop for the singular values. + maxIt := maxIter * n * n + iter := 0 + oldl2 := -1 + oldm := -1 + // m points to the last element of unconverged part of matrix. + m := n + + Outer: + for m > 1 { + if iter > maxIt { + info = 0 + for i := 0; i < n-1; i++ { + if e[i] != 0 { + info++ + } + } + return info == 0 + } + // Find diagonal block of matrix to work on. + if tol < 0 && math.Abs(d[m-1]) <= thresh { + d[m-1] = 0 + } + smax = math.Abs(d[m-1]) + smin := smax + var l2 int + var broke bool + for l3 := 0; l3 < m-1; l3++ { + l2 = m - l3 - 2 + abss := math.Abs(d[l2]) + abse := math.Abs(e[l2]) + if tol < 0 && abss <= thresh { + d[l2] = 0 + } + if abse <= thresh { + broke = true + break + } + smin = math.Min(smin, abss) + smax = math.Max(math.Max(smax, abss), abse) + } + if broke { + e[l2] = 0 + if l2 == m-2 { + // Convergence of bottom singular value, return to top. + m-- + continue + } + l2++ + } else { + l2 = 0 + } + // e[ll] through e[m-2] are nonzero, e[ll-1] is zero + if l2 == m-2 { + // Handle 2×2 block separately. + var sinr, cosr, sinl, cosl float64 + d[m-1], d[m-2], sinr, cosr, sinl, cosl = impl.Dlasv2(d[m-2], e[m-2], d[m-1]) + e[m-2] = 0 + if ncvt > 0 { + bi.Drot(ncvt, vt[(m-2)*ldvt:], 1, vt[(m-1)*ldvt:], 1, cosr, sinr) + } + if nru > 0 { + bi.Drot(nru, u[m-2:], ldu, u[m-1:], ldu, cosl, sinl) + } + if ncc > 0 { + bi.Drot(ncc, c[(m-2)*ldc:], 1, c[(m-1)*ldc:], 1, cosl, sinl) + } + m -= 2 + continue + } + // If working on a new submatrix, choose shift direction from larger end + // diagonal element toward smaller. + if l2 > oldm-1 || m-1 < oldl2 { + if math.Abs(d[l2]) >= math.Abs(d[m-1]) { + idir = 1 + } else { + idir = 2 + } + } + // Apply convergence tests. + // TODO(btracey): There is a lot of similar looking code here. See + // if there is a better way to de-duplicate. + if idir == 1 { + // Run convergence test in forward direction. + // First apply standard test to bottom of matrix. + if math.Abs(e[m-2]) <= math.Abs(tol)*math.Abs(d[m-1]) || (tol < 0 && math.Abs(e[m-2]) <= thresh) { + e[m-2] = 0 + continue + } + if tol >= 0 { + // If relative accuracy desired, apply convergence criterion forward. + mu := math.Abs(d[l2]) + sminl = mu + for l3 := l2; l3 < m-1; l3++ { + if math.Abs(e[l3]) <= tol*mu { + e[l3] = 0 + continue Outer + } + mu = math.Abs(d[l3+1]) * (mu / (mu + math.Abs(e[l3]))) + sminl = math.Min(sminl, mu) + } + } + } else { + // Run convergence test in backward direction. + // First apply standard test to top of matrix. + if math.Abs(e[l2]) <= math.Abs(tol)*math.Abs(d[l2]) || (tol < 0 && math.Abs(e[l2]) <= thresh) { + e[l2] = 0 + continue + } + if tol >= 0 { + // If relative accuracy desired, apply convergence criterion backward. + mu := math.Abs(d[m-1]) + sminl = mu + for l3 := m - 2; l3 >= l2; l3-- { + if math.Abs(e[l3]) <= tol*mu { + e[l3] = 0 + continue Outer + } + mu = math.Abs(d[l3]) * (mu / (mu + math.Abs(e[l3]))) + sminl = math.Min(sminl, mu) + } + } + } + oldl2 = l2 + oldm = m + // Compute shift. First, test if shifting would ruin relative accuracy, + // and if so set the shift to zero. + var shift float64 + if tol >= 0 && float64(n)*tol*(sminl/smax) <= math.Max(eps, (1.0/100)*tol) { + shift = 0 + } else { + var sl2 float64 + if idir == 1 { + sl2 = math.Abs(d[l2]) + shift, _ = impl.Dlas2(d[m-2], e[m-2], d[m-1]) + } else { + sl2 = math.Abs(d[m-1]) + shift, _ = impl.Dlas2(d[l2], e[l2], d[l2+1]) + } + // Test if shift is negligible + if sl2 > 0 { + if (shift/sl2)*(shift/sl2) < eps { + shift = 0 + } + } + } + iter += m - l2 + 1 + // If no shift, do simplified QR iteration. + if shift == 0 { + if idir == 1 { + cs := 1.0 + oldcs := 1.0 + var sn, r, oldsn float64 + for i := l2; i < m-1; i++ { + cs, sn, r = impl.Dlartg(d[i]*cs, e[i]) + if i > l2 { + e[i-1] = oldsn * r + } + oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i+1]*sn) + work[i-l2] = cs + work[i-l2+nm1] = sn + work[i-l2+nm12] = oldcs + work[i-l2+nm13] = oldsn + } + h := d[m-1] * cs + d[m-1] = h * oldcs + e[m-2] = h * oldsn + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) + } + if math.Abs(e[m-2]) < thresh { + e[m-2] = 0 + } + } else { + cs := 1.0 + oldcs := 1.0 + var sn, r, oldsn float64 + for i := m - 1; i >= l2+1; i-- { + cs, sn, r = impl.Dlartg(d[i]*cs, e[i-1]) + if i < m-1 { + e[i] = oldsn * r + } + oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i-1]*sn) + work[i-l2-1] = cs + work[i-l2+nm1-1] = -sn + work[i-l2+nm12-1] = oldcs + work[i-l2+nm13-1] = -oldsn + } + h := d[l2] * cs + d[l2] = h * oldcs + e[l2] = h * oldsn + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) + } + if math.Abs(e[l2]) <= thresh { + e[l2] = 0 + } + } + } else { + // Use nonzero shift. + if idir == 1 { + // Chase bulge from top to bottom. Save cosines and sines for + // later singular vector updates. + f := (math.Abs(d[l2]) - shift) * (math.Copysign(1, d[l2]) + shift/d[l2]) + g := e[l2] + var cosl, sinl float64 + for i := l2; i < m-1; i++ { + cosr, sinr, r := impl.Dlartg(f, g) + if i > l2 { + e[i-1] = r + } + f = cosr*d[i] + sinr*e[i] + e[i] = cosr*e[i] - sinr*d[i] + g = sinr * d[i+1] + d[i+1] *= cosr + cosl, sinl, r = impl.Dlartg(f, g) + d[i] = r + f = cosl*e[i] + sinl*d[i+1] + d[i+1] = cosl*d[i+1] - sinl*e[i] + if i < m-2 { + g = sinl * e[i+1] + e[i+1] = cosl * e[i+1] + } + work[i-l2] = cosr + work[i-l2+nm1] = sinr + work[i-l2+nm12] = cosl + work[i-l2+nm13] = sinl + } + e[m-2] = f + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) + } + if math.Abs(e[m-2]) <= thresh { + e[m-2] = 0 + } + } else { + // Chase bulge from top to bottom. Save cosines and sines for + // later singular vector updates. + f := (math.Abs(d[m-1]) - shift) * (math.Copysign(1, d[m-1]) + shift/d[m-1]) + g := e[m-2] + for i := m - 1; i > l2; i-- { + cosr, sinr, r := impl.Dlartg(f, g) + if i < m-1 { + e[i] = r + } + f = cosr*d[i] + sinr*e[i-1] + e[i-1] = cosr*e[i-1] - sinr*d[i] + g = sinr * d[i-1] + d[i-1] *= cosr + cosl, sinl, r := impl.Dlartg(f, g) + d[i] = r + f = cosl*e[i-1] + sinl*d[i-1] + d[i-1] = cosl*d[i-1] - sinl*e[i-1] + if i > l2+1 { + g = sinl * e[i-2] + e[i-2] *= cosl + } + work[i-l2-1] = cosr + work[i-l2+nm1-1] = -sinr + work[i-l2+nm12-1] = cosl + work[i-l2+nm13-1] = -sinl + } + e[l2] = f + if math.Abs(e[l2]) <= thresh { + e[l2] = 0 + } + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) + } + } + } + } + } + + // All singular values converged, make them positive. + for i := 0; i < n; i++ { + if d[i] < 0 { + d[i] *= -1 + if ncvt > 0 { + bi.Dscal(ncvt, -1, vt[i*ldvt:], 1) + } + } + } + + // Sort the singular values in decreasing order. + for i := 0; i < n-1; i++ { + isub := 0 + smin := d[0] + for j := 1; j < n-i; j++ { + if d[j] <= smin { + isub = j + smin = d[j] + } + } + if isub != n-i { + // Swap singular values and vectors. + d[isub] = d[n-i-1] + d[n-i-1] = smin + if ncvt > 0 { + bi.Dswap(ncvt, vt[isub*ldvt:], 1, vt[(n-i-1)*ldvt:], 1) + } + if nru > 0 { + bi.Dswap(nru, u[isub:], ldu, u[n-i-1:], ldu) + } + if ncc > 0 { + bi.Dswap(ncc, c[isub*ldc:], 1, c[(n-i-1)*ldc:], 1) + } + } + } + info = 0 + for i := 0; i < n-1; i++ { + if e[i] != 0 { + info++ + } + } + return info == 0 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go new file mode 100644 index 00000000..fa9f1eab --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go @@ -0,0 +1,78 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgebak updates an n×m matrix V as +// V = P D V, if side == lapack.RightEV, +// V = P D^{-1} V, if side == lapack.LeftEV, +// where P and D are n×n permutation and scaling matrices, respectively, +// implicitly represented by job, scale, ilo and ihi as returned by Dgebal. +// +// Typically, columns of the matrix V contain the right or left (determined by +// side) eigenvectors of the balanced matrix output by Dgebal, and Dgebak forms +// the eigenvectors of the original matrix. +// +// Dgebak is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebak(job lapack.Job, side lapack.EVSide, n, ilo, ihi int, scale []float64, m int, v []float64, ldv int) { + switch job { + default: + panic(badJob) + case lapack.None, lapack.Permute, lapack.Scale, lapack.PermuteScale: + } + switch side { + default: + panic(badEVSide) + case lapack.LeftEV, lapack.RightEV: + } + checkMatrix(n, m, v, ldv) + switch { + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + } + + // Quick return if possible. + if n == 0 || m == 0 || job == lapack.None { + return + } + + bi := blas64.Implementation() + if ilo != ihi && job != lapack.Permute { + // Backward balance. + if side == lapack.RightEV { + for i := ilo; i <= ihi; i++ { + bi.Dscal(m, scale[i], v[i*ldv:], 1) + } + } else { + for i := ilo; i <= ihi; i++ { + bi.Dscal(m, 1/scale[i], v[i*ldv:], 1) + } + } + } + if job == lapack.Scale { + return + } + // Backward permutation. + for i := ilo - 1; i >= 0; i-- { + k := int(scale[i]) + if k == i { + continue + } + bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) + } + for i := ihi + 1; i < n; i++ { + k := int(scale[i]) + if k == i { + continue + } + bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go new file mode 100644 index 00000000..4af7eed2 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go @@ -0,0 +1,228 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgebal balances an n×n matrix A. Balancing consists of two stages, permuting +// and scaling. Both steps are optional and depend on the value of job. +// +// Permuting consists of applying a permutation matrix P such that the matrix +// that results from P^T*A*P takes the upper block triangular form +// [ T1 X Y ] +// P^T A P = [ 0 B Z ], +// [ 0 0 T2 ] +// where T1 and T2 are upper triangular matrices and B contains at least one +// nonzero off-diagonal element in each row and column. The indices ilo and ihi +// mark the starting and ending columns of the submatrix B. The eigenvalues of A +// isolated in the first 0 to ilo-1 and last ihi+1 to n-1 elements on the +// diagonal can be read off without any roundoff error. +// +// Scaling consists of applying a diagonal similarity transformation D such that +// D^{-1}*B*D has the 1-norm of each row and its corresponding column nearly +// equal. The output matrix is +// [ T1 X*D Y ] +// [ 0 inv(D)*B*D inv(D)*Z ]. +// [ 0 0 T2 ] +// Scaling may reduce the 1-norm of the matrix, and improve the accuracy of +// the computed eigenvalues and/or eigenvectors. +// +// job specifies the operations that will be performed on A. +// If job is lapack.None, Dgebal sets scale[i] = 1 for all i and returns ilo=0, ihi=n-1. +// If job is lapack.Permute, only permuting will be done. +// If job is lapack.Scale, only scaling will be done. +// If job is lapack.PermuteScale, both permuting and scaling will be done. +// +// On return, if job is lapack.Permute or lapack.PermuteScale, it will hold that +// A[i,j] == 0, for i > j and j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}. +// If job is lapack.None or lapack.Scale, or if n == 0, it will hold that +// ilo == 0 and ihi == n-1. +// +// On return, scale will contain information about the permutations and scaling +// factors applied to A. If π(j) denotes the index of the column interchanged +// with column j, and D[j,j] denotes the scaling factor applied to column j, +// then +// scale[j] == π(j), for j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}, +// == D[j,j], for j ∈ {ilo, ..., ihi}. +// scale must have length equal to n, otherwise Dgebal will panic. +// +// Dgebal is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebal(job lapack.Job, n int, a []float64, lda int, scale []float64) (ilo, ihi int) { + switch job { + default: + panic(badJob) + case lapack.None, lapack.Permute, lapack.Scale, lapack.PermuteScale: + } + checkMatrix(n, n, a, lda) + if len(scale) != n { + panic("lapack: bad length of scale") + } + + ilo = 0 + ihi = n - 1 + + if n == 0 || job == lapack.None { + for i := range scale { + scale[i] = 1 + } + return ilo, ihi + } + + bi := blas64.Implementation() + swapped := true + + if job == lapack.Scale { + goto scaling + } + + // Permutation to isolate eigenvalues if possible. + // + // Search for rows isolating an eigenvalue and push them down. + for swapped { + swapped = false + rows: + for i := ihi; i >= 0; i-- { + for j := 0; j <= ihi; j++ { + if i == j { + continue + } + if a[i*lda+j] != 0 { + continue rows + } + } + // Row i has only zero off-diagonal elements in the + // block A[ilo:ihi+1,ilo:ihi+1]. + scale[ihi] = float64(i) + if i != ihi { + bi.Dswap(ihi+1, a[i:], lda, a[ihi:], lda) + bi.Dswap(n, a[i*lda:], 1, a[ihi*lda:], 1) + } + if ihi == 0 { + scale[0] = 1 + return ilo, ihi + } + ihi-- + swapped = true + break + } + } + // Search for columns isolating an eigenvalue and push them left. + swapped = true + for swapped { + swapped = false + columns: + for j := ilo; j <= ihi; j++ { + for i := ilo; i <= ihi; i++ { + if i == j { + continue + } + if a[i*lda+j] != 0 { + continue columns + } + } + // Column j has only zero off-diagonal elements in the + // block A[ilo:ihi+1,ilo:ihi+1]. + scale[ilo] = float64(j) + if j != ilo { + bi.Dswap(ihi+1, a[j:], lda, a[ilo:], lda) + bi.Dswap(n-ilo, a[j*lda+ilo:], 1, a[ilo*lda+ilo:], 1) + } + swapped = true + ilo++ + break + } + } + +scaling: + for i := ilo; i <= ihi; i++ { + scale[i] = 1 + } + + if job == lapack.Permute { + return ilo, ihi + } + + // Balance the submatrix in rows ilo to ihi. + + const ( + // sclfac should be a power of 2 to avoid roundoff errors. + // Elements of scale are restricted to powers of sclfac, + // therefore the matrix will be only nearly balanced. + sclfac = 2 + // factor determines the minimum reduction of the row and column + // norms that is considered non-negligible. It must be less than 1. + factor = 0.95 + ) + sfmin1 := dlamchS / dlamchP + sfmax1 := 1 / sfmin1 + sfmin2 := sfmin1 * sclfac + sfmax2 := 1 / sfmin2 + + // Iterative loop for norm reduction. + var conv bool + for !conv { + conv = true + for i := ilo; i <= ihi; i++ { + c := bi.Dnrm2(ihi-ilo+1, a[ilo*lda+i:], lda) + r := bi.Dnrm2(ihi-ilo+1, a[i*lda+ilo:], 1) + ica := bi.Idamax(ihi+1, a[i:], lda) + ca := math.Abs(a[ica*lda+i]) + ira := bi.Idamax(n-ilo, a[i*lda+ilo:], 1) + ra := math.Abs(a[i*lda+ilo+ira]) + + // Guard against zero c or r due to underflow. + if c == 0 || r == 0 { + continue + } + g := r / sclfac + f := 1.0 + s := c + r + for c < g && math.Max(f, math.Max(c, ca)) < sfmax2 && math.Min(r, math.Min(g, ra)) > sfmin2 { + if math.IsNaN(c + f + ca + r + g + ra) { + // Panic if NaN to avoid infinite loop. + panic("lapack: NaN") + } + f *= sclfac + c *= sclfac + ca *= sclfac + g /= sclfac + r /= sclfac + ra /= sclfac + } + g = c / sclfac + for r <= g && math.Max(r, ra) < sfmax2 && math.Min(math.Min(f, c), math.Min(g, ca)) > sfmin2 { + f /= sclfac + c /= sclfac + ca /= sclfac + g /= sclfac + r *= sclfac + ra *= sclfac + } + + if c+r >= factor*s { + // Reduction would be negligible. + continue + } + if f < 1 && scale[i] < 1 && f*scale[i] <= sfmin1 { + continue + } + if f > 1 && scale[i] > 1 && scale[i] >= sfmax1/f { + continue + } + + // Now balance. + scale[i] *= f + bi.Dscal(n-ilo, 1/f, a[i*lda+ilo:], 1) + bi.Dscal(ihi+1, f, a[i:], lda) + conv = false + } + } + return ilo, ihi +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go new file mode 100644 index 00000000..a8e4aacb --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go @@ -0,0 +1,74 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgebd2 reduces an m×n matrix A to upper or lower bidiagonal form by an orthogonal +// transformation. +// Q^T * A * P = B +// if m >= n, B is upper diagonal, otherwise B is lower bidiagonal. +// d is the diagonal, len = min(m,n) +// e is the off-diagonal len = min(m,n)-1 +// +// Dgebd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebd2(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64) { + checkMatrix(m, n, a, lda) + if len(d) < min(m, n) { + panic(badD) + } + if len(e) < min(m, n)-1 { + panic(badE) + } + if len(tauQ) < min(m, n) { + panic(badTauQ) + } + if len(tauP) < min(m, n) { + panic(badTauP) + } + if len(work) < max(m, n) { + panic(badWork) + } + if m >= n { + for i := 0; i < n; i++ { + a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) + d[i] = a[i*lda+i] + a[i*lda+i] = 1 + // Apply H_i to A[i:m, i+1:n] from the left. + if i < n-1 { + impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tauQ[i], a[i*lda+i+1:], lda, work) + } + a[i*lda+i] = d[i] + if i < n-1 { + a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) + e[i] = a[i*lda+i+1] + a[i*lda+i+1] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i-1, a[i*lda+i+1:], 1, tauP[i], a[(i+1)*lda+i+1:], lda, work) + a[i*lda+i+1] = e[i] + } else { + tauP[i] = 0 + } + } + return + } + for i := 0; i < m; i++ { + a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + d[i] = a[i*lda+i] + a[i*lda+i] = 1 + if i < m-1 { + impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tauP[i], a[(i+1)*lda+i:], lda, work) + } + a[i*lda+i] = d[i] + if i < m-1 { + a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + a[(i+1)*lda+i] = 1 + impl.Dlarf(blas.Left, m-i-1, n-i-1, a[(i+1)*lda+i:], lda, tauQ[i], a[(i+1)*lda+i+1:], lda, work) + a[(i+1)*lda+i] = e[i] + } else { + tauQ[i] = 0 + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go new file mode 100644 index 00000000..794ac2c0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go @@ -0,0 +1,150 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgebrd reduces a general m×n matrix A to upper or lower bidiagonal form B by +// an orthogonal transformation: +// Q^T * A * P = B. +// The diagonal elements of B are stored in d and the off-diagonal elements are stored +// in e. These are additionally stored along the diagonal of A and the off-diagonal +// of A. If m >= n B is an upper-bidiagonal matrix, and if m < n B is a +// lower-bidiagonal matrix. +// +// The remaining elements of A store the data needed to construct Q and P. +// The matrices Q and P are products of elementary reflectors +// if m >= n, Q = H_0 * H_1 * ... * H_{n-1}, +// P = G_0 * G_1 * ... * G_{n-2}, +// if m < n, Q = H_0 * H_1 * ... * H_{m-2}, +// P = G_0 * G_1 * ... * G_{m-1}, +// where +// H_i = I - tauQ[i] * v_i * v_i^T, +// G_i = I - tauP[i] * u_i * u_i^T. +// +// As an example, on exit the entries of A when m = 6, and n = 5 +// [ d e u1 u1 u1] +// [v1 d e u2 u2] +// [v1 v2 d e u3] +// [v1 v2 v3 d e] +// [v1 v2 v3 v4 d] +// [v1 v2 v3 v4 v5] +// and when m = 5, n = 6 +// [ d u1 u1 u1 u1 u1] +// [ e d u2 u2 u2 u2] +// [v1 e d u3 u3 u3] +// [v1 v2 e d u4 u4] +// [v1 v2 v3 e d u5] +// +// d, tauQ, and tauP must all have length at least min(m,n), and e must have +// length min(m,n) - 1, unless lwork is -1 when there is no check except for +// work which must have a length of at least one. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(1,m,n) or be -1 and this function will panic otherwise. +// Dgebrd is blocked decomposition, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgebrd, +// the optimal work length will be stored into work[0]. +// +// Dgebrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebrd(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64, lwork int) { + checkMatrix(m, n, a, lda) + // Calculate optimal work. + nb := impl.Ilaenv(1, "DGEBRD", " ", m, n, -1, -1) + var lworkOpt int + if lwork == -1 { + if len(work) < 1 { + panic(badWork) + } + lworkOpt = ((m + n) * nb) + work[0] = float64(max(1, lworkOpt)) + return + } + minmn := min(m, n) + if len(d) < minmn { + panic(badD) + } + if len(e) < minmn-1 { + panic(badE) + } + if len(tauQ) < minmn { + panic(badTauQ) + } + if len(tauP) < minmn { + panic(badTauP) + } + ws := max(m, n) + if lwork < max(1, ws) { + panic(badWork) + } + if len(work) < lwork { + panic(badWork) + } + var nx int + if nb > 1 && nb < minmn { + nx = max(nb, impl.Ilaenv(3, "DGEBRD", " ", m, n, -1, -1)) + if nx < minmn { + ws = (m + n) * nb + if lwork < ws { + nbmin := impl.Ilaenv(2, "DGEBRD", " ", m, n, -1, -1) + if lwork >= (m+n)*nbmin { + nb = lwork / (m + n) + } else { + nb = minmn + nx = minmn + } + } + } + } else { + nx = minmn + } + bi := blas64.Implementation() + ldworkx := nb + ldworky := nb + var i int + // Netlib lapack has minmn - nx, but this makes the last nx rows (which by + // default is large) be unblocked. As written here, the blocking is more + // consistent. + for i = 0; i < minmn-nb; i += nb { + // Reduce rows and columns i:i+nb to bidiagonal form and return + // the matrices X and Y which are needed to update the unreduced + // part of the matrix. + // X is stored in the first m rows of work, y in the next rows. + x := work[:m*ldworkx] + y := work[m*ldworkx:] + impl.Dlabrd(m-i, n-i, nb, a[i*lda+i:], lda, + d[i:], e[i:], tauQ[i:], tauP[i:], + x, ldworkx, y, ldworky) + + // Update the trailing submatrix A[i+nb:m,i+nb:n], using an update + // of the form A := A - V*Y**T - X*U**T + bi.Dgemm(blas.NoTrans, blas.Trans, m-i-nb, n-i-nb, nb, + -1, a[(i+nb)*lda+i:], lda, y[nb*ldworky:], ldworky, + 1, a[(i+nb)*lda+i+nb:], lda) + + bi.Dgemm(blas.NoTrans, blas.NoTrans, m-i-nb, n-i-nb, nb, + -1, x[nb*ldworkx:], ldworkx, a[i*lda+i+nb:], lda, + 1, a[(i+nb)*lda+i+nb:], lda) + + // Copy diagonal and off-diagonal elements of B back into A. + if m >= n { + for j := i; j < i+nb; j++ { + a[j*lda+j] = d[j] + a[j*lda+j+1] = e[j] + } + } else { + for j := i; j < i+nb; j++ { + a[j*lda+j] = d[j] + a[(j+1)*lda+j] = e[j] + } + } + } + // Use unblocked code to reduce the remainder of the matrix. + impl.Dgebd2(m-i, n-i, a[i*lda+i:], lda, d[i:], e[i:], tauQ[i:], tauP[i:], work) + work[0] = float64(lworkOpt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go new file mode 100644 index 00000000..04e01535 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go @@ -0,0 +1,81 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgecon estimates the reciprocal of the condition number of the n×n matrix A +// given the LU decomposition of the matrix. The condition number computed may +// be based on the 1-norm or the ∞-norm. +// +// The slice a contains the result of the LU decomposition of A as computed by Dgetrf. +// +// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 4*n and Dgecon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dgecon will panic otherwise. +func (impl Implementation) Dgecon(norm lapack.MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { + checkMatrix(n, n, a, lda) + if norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum { + panic(badNorm) + } + if len(work) < 4*n { + panic(badWork) + } + if len(iwork) < n { + panic(badWork) + } + + if n == 0 { + return 1 + } else if anorm == 0 { + return 0 + } + + bi := blas64.Implementation() + var rcond, ainvnm float64 + var kase int + var normin bool + isave := new([3]int) + onenrm := norm == lapack.MaxColumnSum + smlnum := dlamchS + kase1 := 2 + if onenrm { + kase1 = 1 + } + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / ainvnm) / anorm + } + return rcond + } + var sl, su float64 + if kase == kase1 { + sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.Unit, normin, n, a, lda, work, work[2*n:]) + su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) + } else { + su = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) + sl = impl.Dlatrs(blas.Lower, blas.Trans, blas.Unit, normin, n, a, lda, work, work[2*n:]) + } + scale := sl * su + normin = true + if scale != 1 { + ix := bi.Idamax(n, work, 1) + if scale == 0 || scale < math.Abs(work[ix])*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go new file mode 100644 index 00000000..314074c9 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go @@ -0,0 +1,284 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgeev computes the eigenvalues and, optionally, the left and/or right +// eigenvectors for an n×n real nonsymmetric matrix A. +// +// The right eigenvector v_j of A corresponding to an eigenvalue λ_j +// is defined by +// A v_j = λ_j v_j, +// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by +// u_j^H A = λ_j u_j^H, +// where u_j^H is the conjugate transpose of u_j. +// +// On return, A will be overwritten and the left and right eigenvectors will be +// stored, respectively, in the columns of the n×n matrices VL and VR in the +// same order as their eigenvalues. If the j-th eigenvalue is real, then +// u_j = VL[:,j], +// v_j = VR[:,j], +// and if it is not real, then j and j+1 form a complex conjugate pair and the +// eigenvectors can be recovered as +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// where i is the imaginary unit. The computed eigenvectors are normalized to +// have Euclidean norm equal to 1 and largest component real. +// +// Left eigenvectors will be computed only if jobvl == lapack.ComputeLeftEV, +// otherwise jobvl must be lapack.None. Right eigenvectors will be computed +// only if jobvr == lapack.ComputeRightEV, otherwise jobvr must be lapack.None. +// For other values of jobvl and jobvr Dgeev will panic. +// +// wr and wi contain the real and imaginary parts, respectively, of the computed +// eigenvalues. Complex conjugate pairs of eigenvalues appear consecutively with +// the eigenvalue having the positive imaginary part first. +// wr and wi must have length n, and Dgeev will panic otherwise. +// +// work must have length at least lwork and lwork must be at least max(1,4*n) if +// the left or right eigenvectors are computed, and at least max(1,3*n) if no +// eigenvectors are computed. For good performance, lwork must generally be +// larger. On return, optimal value of lwork will be stored in work[0]. +// +// If lwork == -1, instead of performing Dgeev, the function only calculates the +// optimal vaule of lwork and stores it into work[0]. +// +// On return, first is the index of the first valid eigenvalue. If first == 0, +// all eigenvalues and eigenvectors have been computed. If first is positive, +// Dgeev failed to compute all the eigenvalues, no eigenvectors have been +// computed and wr[first:] and wi[first:] contain those eigenvalues which have +// converged. +func (impl Implementation) Dgeev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) { + var wantvl bool + switch jobvl { + default: + panic("lapack: invalid LeftEVJob") + case lapack.ComputeLeftEV: + wantvl = true + case lapack.None: + } + var wantvr bool + switch jobvr { + default: + panic("lapack: invalid RightEVJob") + case lapack.ComputeRightEV: + wantvr = true + case lapack.None: + } + switch { + case n < 0: + panic(nLT0) + case len(work) < lwork: + panic(shortWork) + } + var minwrk int + if wantvl || wantvr { + minwrk = max(1, 4*n) + } else { + minwrk = max(1, 3*n) + } + if lwork != -1 { + checkMatrix(n, n, a, lda) + if wantvl { + checkMatrix(n, n, vl, ldvl) + } + if wantvr { + checkMatrix(n, n, vr, ldvr) + } + switch { + case len(wr) != n: + panic("lapack: bad length of wr") + case len(wi) != n: + panic("lapack: bad length of wi") + case lwork < minwrk: + panic(badWork) + } + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + maxwrk := 2*n + n*impl.Ilaenv(1, "DGEHRD", " ", n, 1, n, 0) + if wantvl || wantvr { + maxwrk = max(maxwrk, 2*n+(n-1)*impl.Ilaenv(1, "DORGHR", " ", n, 1, n, -1)) + impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.OriginalEV, n, 0, n-1, + nil, 1, nil, nil, nil, 1, work, -1) + maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) + side := lapack.LeftEV + if wantvr { + side = lapack.RightEV + } + impl.Dtrevc3(side, lapack.AllEVMulQ, nil, n, nil, 1, nil, 1, nil, 1, + n, work, -1) + maxwrk = max(maxwrk, n+int(work[0])) + maxwrk = max(maxwrk, 4*n) + } else { + impl.Dhseqr(lapack.EigenvaluesOnly, lapack.None, n, 0, n-1, + nil, 1, nil, nil, nil, 1, work, -1) + maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) + } + maxwrk = max(maxwrk, minwrk) + + if lwork == -1 { + work[0] = float64(maxwrk) + return 0 + } + + // Get machine constants. + smlnum := math.Sqrt(dlamchS) / dlamchP + bignum := 1 / smlnum + + // Scale A if max element outside range [smlnum,bignum]. + anrm := impl.Dlange(lapack.MaxAbs, n, n, a, lda, nil) + var scalea bool + var cscale float64 + if 0 < anrm && anrm < smlnum { + scalea = true + cscale = smlnum + } else if anrm > bignum { + scalea = true + cscale = bignum + } + if scalea { + impl.Dlascl(lapack.General, 0, 0, anrm, cscale, n, n, a, lda) + } + + // Balance the matrix. + workbal := work[:n] + ilo, ihi := impl.Dgebal(lapack.PermuteScale, n, a, lda, workbal) + + // Reduce to upper Hessenberg form. + iwrk := 2 * n + tau := work[n : iwrk-1] + impl.Dgehrd(n, ilo, ihi, a, lda, tau, work[iwrk:], lwork-iwrk) + + var side lapack.EVSide + if wantvl { + side = lapack.LeftEV + // Copy Householder vectors to VL. + impl.Dlacpy(blas.Lower, n, n, a, lda, vl, ldvl) + // Generate orthogonal matrix in VL. + impl.Dorghr(n, ilo, ihi, vl, ldvl, tau, work[iwrk:], lwork-iwrk) + // Perform QR iteration, accumulating Schur vectors in VL. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.OriginalEV, n, ilo, ihi, + a, lda, wr, wi, vl, ldvl, work[iwrk:], lwork-iwrk) + if wantvr { + // Want left and right eigenvectors. + // Copy Schur vectors to VR. + side = lapack.RightLeftEV + impl.Dlacpy(blas.All, n, n, vl, ldvl, vr, ldvr) + } + } else if wantvr { + side = lapack.RightEV + // Copy Householder vectors to VR. + impl.Dlacpy(blas.Lower, n, n, a, lda, vr, ldvr) + // Generate orthogonal matrix in VR. + impl.Dorghr(n, ilo, ihi, vr, ldvr, tau, work[iwrk:], lwork-iwrk) + // Perform QR iteration, accumulating Schur vectors in VR. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.OriginalEV, n, ilo, ihi, + a, lda, wr, wi, vr, ldvr, work[iwrk:], lwork-iwrk) + } else { + // Compute eigenvalues only. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesOnly, lapack.None, n, ilo, ihi, + a, lda, wr, wi, nil, 1, work[iwrk:], lwork-iwrk) + } + + if first > 0 { + if scalea { + // Undo scaling. + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wr, 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wi, 1) + } + work[0] = float64(maxwrk) + return first + } + + if wantvl || wantvr { + // Compute left and/or right eigenvectors. + impl.Dtrevc3(side, lapack.AllEVMulQ, nil, n, + a, lda, vl, ldvl, vr, ldvr, n, work[iwrk:], lwork-iwrk) + } + bi := blas64.Implementation() + if wantvl { + // Undo balancing of left eigenvectors. + impl.Dgebak(lapack.PermuteScale, lapack.LeftEV, n, ilo, ihi, workbal, n, vl, ldvl) + // Normalize left eigenvectors and make largest component real. + for i, wii := range wi { + if wii < 0 { + continue + } + if wii == 0 { + scl := 1 / bi.Dnrm2(n, vl[i:], ldvl) + bi.Dscal(n, scl, vl[i:], ldvl) + continue + } + scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vl[i:], ldvl), bi.Dnrm2(n, vl[i+1:], ldvl)) + bi.Dscal(n, scl, vl[i:], ldvl) + bi.Dscal(n, scl, vl[i+1:], ldvl) + for k := 0; k < n; k++ { + vi := vl[k*ldvl+i] + vi1 := vl[k*ldvl+i+1] + work[iwrk+k] = vi*vi + vi1*vi1 + } + k := bi.Idamax(n, work[iwrk:iwrk+n], 1) + cs, sn, _ := impl.Dlartg(vl[k*ldvl+i], vl[k*ldvl+i+1]) + bi.Drot(n, vl[i:], ldvl, vl[i+1:], ldvl, cs, sn) + vl[k*ldvl+i+1] = 0 + } + } + if wantvr { + // Undo balancing of right eigenvectors. + impl.Dgebak(lapack.PermuteScale, lapack.RightEV, n, ilo, ihi, workbal, n, vr, ldvr) + // Normalize right eigenvectors and make largest component real. + for i, wii := range wi { + if wii < 0 { + continue + } + if wii == 0 { + scl := 1 / bi.Dnrm2(n, vr[i:], ldvr) + bi.Dscal(n, scl, vr[i:], ldvr) + continue + } + scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vr[i:], ldvr), bi.Dnrm2(n, vr[i+1:], ldvr)) + bi.Dscal(n, scl, vr[i:], ldvr) + bi.Dscal(n, scl, vr[i+1:], ldvr) + for k := 0; k < n; k++ { + vi := vr[k*ldvr+i] + vi1 := vr[k*ldvr+i+1] + work[iwrk+k] = vi*vi + vi1*vi1 + } + k := bi.Idamax(n, work[iwrk:iwrk+n], 1) + cs, sn, _ := impl.Dlartg(vr[k*ldvr+i], vr[k*ldvr+i+1]) + bi.Drot(n, vr[i:], ldvr, vr[i+1:], ldvr, cs, sn) + vr[k*ldvr+i+1] = 0 + } + } + + if scalea { + // Undo scaling. + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) + } + + work[0] = float64(maxwrk) + return first +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go new file mode 100644 index 00000000..36828378 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go @@ -0,0 +1,84 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgehd2 reduces a block of a general n×n matrix A to upper Hessenberg form H +// by an orthogonal similarity transformation Q^T * A * Q = H. +// +// The matrix Q is represented as a product of (ihi-ilo) elementary +// reflectors +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T +// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. +// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. +// +// On entry, a contains the n×n general matrix to be reduced. On return, the +// upper triangle and the first subdiagonal of A are overwritten with the upper +// Hessenberg matrix H, and the elements below the first subdiagonal, with the +// slice tau, represent the orthogonal matrix Q as a product of elementary +// reflectors. +// +// The contents of A are illustrated by the following example, with n = 7, ilo = +// 1 and ihi = 5. +// On entry, +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// on return, +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// ilo and ihi determine the block of A that will be reduced to upper Hessenberg +// form. It must hold that 0 <= ilo <= ihi <= max(0, n-1), otherwise Dgehd2 will +// panic. +// +// On return, tau will contain the scalar factors of the elementary reflectors. +// It must have length equal to n-1, otherwise Dgehd2 will panic. +// +// work must have length at least n, otherwise Dgehd2 will panic. +// +// Dgehd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgehd2(n, ilo, ihi int, a []float64, lda int, tau, work []float64) { + checkMatrix(n, n, a, lda) + switch { + case ilo < 0 || ilo > max(0, n-1): + panic(badIlo) + case ihi < min(ilo, n-1) || ihi >= n: + panic(badIhi) + case len(tau) != n-1: + panic(badTau) + case len(work) < n: + panic(badWork) + } + + for i := ilo; i < ihi; i++ { + // Compute elementary reflector H_i to annihilate A[i+2:ihi+1,i]. + var aii float64 + aii, tau[i] = impl.Dlarfg(ihi-i, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + a[(i+1)*lda+i] = 1 + + // Apply H_i to A[0:ihi+1,i+1:ihi+1] from the right. + impl.Dlarf(blas.Right, ihi+1, ihi-i, a[(i+1)*lda+i:], lda, tau[i], a[i+1:], lda, work) + + // Apply H_i to A[i+1:ihi+1,i+1:n] from the left. + impl.Dlarf(blas.Left, ihi-i, n-i-1, a[(i+1)*lda+i:], lda, tau[i], a[(i+1)*lda+i+1:], lda, work) + a[(i+1)*lda+i] = aii + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go new file mode 100644 index 00000000..027747d1 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go @@ -0,0 +1,183 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgehrd reduces a block of a real n×n general matrix A to upper Hessenberg +// form H by an orthogonal similarity transformation Q^T * A * Q = H. +// +// The matrix Q is represented as a product of (ihi-ilo) elementary +// reflectors +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T +// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. +// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. +// +// On entry, a contains the n×n general matrix to be reduced. On return, the +// upper triangle and the first subdiagonal of A will be overwritten with the +// upper Hessenberg matrix H, and the elements below the first subdiagonal, with +// the slice tau, represent the orthogonal matrix Q as a product of elementary +// reflectors. +// +// The contents of a are illustrated by the following example, with n = 7, ilo = +// 1 and ihi = 5. +// On entry, +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// on return, +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// ilo and ihi determine the block of A that will be reduced to upper Hessenberg +// form. It must hold that 0 <= ilo <= ihi < n if n > 0, and ilo == 0 and ihi == +// -1 if n == 0, otherwise Dgehrd will panic. +// +// On return, tau will contain the scalar factors of the elementary reflectors. +// Elements tau[:ilo] and tau[ihi:] will be set to zero. tau must have length +// equal to n-1 if n > 0, otherwise Dgehrd will panic. +// +// work must have length at least lwork and lwork must be at least max(1,n), +// otherwise Dgehrd will panic. On return, work[0] contains the optimal value of +// lwork. +// +// If lwork == -1, instead of performing Dgehrd, only the optimal value of lwork +// will be stored in work[0]. +// +// Dgehrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgehrd(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lwork < max(1, n) && lwork != -1: + panic(badWork) + case len(work) < lwork: + panic(shortWork) + } + if lwork != -1 { + checkMatrix(n, n, a, lda) + if len(tau) != n-1 && n > 0 { + panic(badTau) + } + } + + const ( + nbmax = 64 + ldt = nbmax + 1 + tsize = ldt * nbmax + ) + // Compute the workspace requirements. + nb := min(nbmax, impl.Ilaenv(1, "DGEHRD", " ", n, ilo, ihi, -1)) + lwkopt := n*nb + tsize + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + // Set tau[:ilo] and tau[ihi:] to zero. + for i := 0; i < ilo; i++ { + tau[i] = 0 + } + for i := ihi; i < n-1; i++ { + tau[i] = 0 + } + + // Quick return if possible. + nh := ihi - ilo + 1 + if nh <= 1 { + work[0] = 1 + return + } + + // Determine the block size. + nbmin := 2 + var nx int + if 1 < nb && nb < nh { + // Determine when to cross over from blocked to unblocked code + // (last block is always handled by unblocked code). + nx = max(nb, impl.Ilaenv(3, "DGEHRD", " ", n, ilo, ihi, -1)) + if nx < nh { + // Determine if workspace is large enough for blocked code. + if lwork < n*nb+tsize { + // Not enough workspace to use optimal nb: + // determine the minimum value of nb, and reduce + // nb or force use of unblocked code. + nbmin = max(2, impl.Ilaenv(2, "DGEHRD", " ", n, ilo, ihi, -1)) + if lwork >= n*nbmin+tsize { + nb = (lwork - tsize) / n + } else { + nb = 1 + } + } + } + } + ldwork := nb // work is used as an n×nb matrix. + + var i int + if nb < nbmin || nh <= nb { + // Use unblocked code below. + i = ilo + } else { + // Use blocked code. + bi := blas64.Implementation() + iwt := n * nb // Size of the matrix Y and index where the matrix T starts in work. + for i = ilo; i < ihi-nx; i += nb { + ib := min(nb, ihi-i) + + // Reduce columns [i:i+ib] to Hessenberg form, returning the + // matrices V and T of the block reflector H = I - V*T*V^T + // which performs the reduction, and also the matrix Y = A*V*T. + impl.Dlahr2(ihi+1, i+1, ib, a[i:], lda, tau[i:], work[iwt:], ldt, work, ldwork) + + // Apply the block reflector H to A[:ihi+1,i+ib:ihi+1] from the + // right, computing A := A - Y * V^T. V[i+ib,i+ib-1] must be set + // to 1. + ei := a[(i+ib)*lda+i+ib-1] + a[(i+ib)*lda+i+ib-1] = 1 + bi.Dgemm(blas.NoTrans, blas.Trans, ihi+1, ihi-i-ib+1, ib, + -1, work, ldwork, + a[(i+ib)*lda+i:], lda, + 1, a[i+ib:], lda) + a[(i+ib)*lda+i+ib-1] = ei + + // Apply the block reflector H to A[0:i+1,i+1:i+ib-1] from the + // right. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, i+1, ib-1, + 1, a[(i+1)*lda+i:], lda, work, ldwork) + for j := 0; j <= ib-2; j++ { + bi.Daxpy(i+1, -1, work[j:], ldwork, a[i+j+1:], lda) + } + + // Apply the block reflector H to A[i+1:ihi+1,i+ib:n] from the + // left. + impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, + ihi-i, n-i-ib, ib, + a[(i+1)*lda+i:], lda, work[iwt:], ldt, a[(i+1)*lda+i+ib:], lda, work, ldwork) + } + } + // Use unblocked code to reduce the rest of the matrix. + impl.Dgehd2(n, i, ihi, a, lda, tau, work) + work[0] = float64(lwkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go new file mode 100644 index 00000000..05b3ce45 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go @@ -0,0 +1,49 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgelq2 computes the LQ factorization of the m×n matrix A. +// +// In an LQ factorization, L is a lower triangular m×n matrix, and Q is an n×n +// orthonormal matrix. +// +// a is modified to contain the information to construct L and Q. +// The lower triangle of a contains the matrix L. The upper triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length of at least k = min(m,n) +// and this function will panic otherwise. +// +// See Dgeqr2 for a description of the elementary reflectors and orthonormal +// matrix Q. Q is constructed as a product of these elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0. +// +// work is temporary storage of length at least m and this function will panic otherwise. +// +// Dgelq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgelq2(m, n int, a []float64, lda int, tau, work []float64) { + checkMatrix(m, n, a, lda) + k := min(m, n) + if len(tau) < k { + panic(badTau) + } + if len(work) < m { + panic(badWork) + } + for i := 0; i < k; i++ { + a[i*lda+i], tau[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + if i < m-1 { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i, + a[i*lda+i:], 1, + tau[i], + a[(i+1)*lda+i:], lda, + work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go new file mode 100644 index 00000000..12913911 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go @@ -0,0 +1,84 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgelqf computes the LQ factorization of the m×n matrix A using a blocked +// algorithm. See the documentation for Dgelq2 for a description of the +// parameters at entry and exit. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m, and this function will panic otherwise. +// Dgelqf is a blocked LQ factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgelqf, +// the optimal work length will be stored into work[0]. +// +// tau must have length at least min(m,n), and this function will panic otherwise. +func (impl Implementation) Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + nb := impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) + lworkopt := m * max(nb, 1) + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + checkMatrix(m, n, a, lda) + if len(work) < lwork { + panic(shortWork) + } + if lwork < m { + panic(badWork) + } + k := min(m, n) + if len(tau) < k { + panic(badTau) + } + if k == 0 { + return + } + // Find the optimal blocking size based on the size of available memory + // and optimal machine parameters. + nbmin := 2 + var nx int + iws := m + ldwork := nb + if nb > 1 && k > nb { + nx = max(0, impl.Ilaenv(3, "DGELQF", " ", m, n, -1, -1)) + if nx < k { + iws = m * nb + if lwork < iws { + nb = lwork / m + nbmin = max(2, impl.Ilaenv(2, "DGELQF", " ", m, n, -1, -1)) + } + } + } + // Computed blocked LQ factorization. + var i int + if nb >= nbmin && nb < k && nx < k { + for i = 0; i < k-nx; i += nb { + ib := min(k-i, nb) + impl.Dgelq2(ib, n-i, a[i*lda+i:], lda, tau[i:], work) + if i+ib < m { + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Forward, lapack.RowWise, + m-i-ib, n-i, ib, + a[i*lda+i:], lda, + work, ldwork, + a[(i+ib)*lda+i:], lda, + work[ib*ldwork:], ldwork) + } + } + } + // Perform unblocked LQ factorization on the remainder. + if i < k { + impl.Dgelq2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go new file mode 100644 index 00000000..214b9663 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go @@ -0,0 +1,200 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgels finds a minimum-norm solution based on the matrices A and B using the +// QR or LQ factorization. Dgels returns false if the matrix +// A is singular, and true if this solution was successfully found. +// +// The minimization problem solved depends on the input parameters. +// +// 1. If m >= n and trans == blas.NoTrans, Dgels finds X such that || A*X - B||_2 +// is minimized. +// 2. If m < n and trans == blas.NoTrans, Dgels finds the minimum norm solution of +// A * X = B. +// 3. If m >= n and trans == blas.Trans, Dgels finds the minimum norm solution of +// A^T * X = B. +// 4. If m < n and trans == blas.Trans, Dgels finds X such that || A*X - B||_2 +// is minimized. +// Note that the least-squares solutions (cases 1 and 3) perform the minimization +// per column of B. This is not the same as finding the minimum-norm matrix. +// +// The matrix A is a general matrix of size m×n and is modified during this call. +// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, +// the elements of b specify the input matrix B. B has size m×nrhs if +// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the +// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, +// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic +// otherwise. A longer work will enable blocked algorithms to be called. +// In the special case that lwork == -1, work[0] will be set to the optimal working +// length. +func (impl Implementation) Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool { + notran := trans == blas.NoTrans + checkMatrix(m, n, a, lda) + mn := min(m, n) + checkMatrix(max(m, n), nrhs, b, ldb) + + // Find optimal block size. + tpsd := true + if notran { + tpsd = false + } + var nb int + if m >= n { + nb = impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) + if tpsd { + nb = max(nb, impl.Ilaenv(1, "DORMQR", "LN", m, nrhs, n, -1)) + } else { + nb = max(nb, impl.Ilaenv(1, "DORMQR", "LT", m, nrhs, n, -1)) + } + } else { + nb = impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) + if tpsd { + nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LT", n, nrhs, m, -1)) + } else { + nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LN", n, nrhs, m, -1)) + } + } + if lwork == -1 { + work[0] = float64(max(1, mn+max(mn, nrhs)*nb)) + return true + } + + if len(work) < lwork { + panic(shortWork) + } + if lwork < mn+max(mn, nrhs) { + panic(badWork) + } + if m == 0 || n == 0 || nrhs == 0 { + impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) + return true + } + + // Scale the input matrices if they contain extreme values. + smlnum := dlamchS / dlamchP + bignum := 1 / smlnum + anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) + var iascl int + if anrm > 0 && anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) + iascl = 1 + } else if anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) + } else if anrm == 0 { + // Matrix is all zeros. + impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) + return true + } + brow := m + if tpsd { + brow = n + } + bnrm := impl.Dlange(lapack.MaxAbs, brow, nrhs, b, ldb, nil) + ibscl := 0 + if bnrm > 0 && bnrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, bnrm, smlnum, brow, nrhs, b, ldb) + ibscl = 1 + } else if bnrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bnrm, bignum, brow, nrhs, b, ldb) + ibscl = 2 + } + + // Solve the minimization problem using a QR or an LQ decomposition. + var scllen int + if m >= n { + impl.Dgeqrf(m, n, a, lda, work, work[mn:], lwork-mn) + if !tpsd { + impl.Dormqr(blas.Left, blas.Trans, m, nrhs, n, + a, lda, + work[:n], + b, ldb, + work[mn:], lwork-mn) + ok := impl.Dtrtrs(blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + scllen = n + } else { + ok := impl.Dtrtrs(blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + for i := n; i < m; i++ { + for j := 0; j < nrhs; j++ { + b[i*ldb+j] = 0 + } + } + impl.Dormqr(blas.Left, blas.NoTrans, m, nrhs, n, + a, lda, + work[:n], + b, ldb, + work[mn:], lwork-mn) + scllen = m + } + } else { + impl.Dgelqf(m, n, a, lda, work, work[mn:], lwork-mn) + if !tpsd { + ok := impl.Dtrtrs(blas.Lower, blas.NoTrans, blas.NonUnit, + m, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + for i := m; i < n; i++ { + for j := 0; j < nrhs; j++ { + b[i*ldb+j] = 0 + } + } + impl.Dormlq(blas.Left, blas.Trans, n, nrhs, m, + a, lda, + work, + b, ldb, + work[mn:], lwork-mn) + scllen = n + } else { + impl.Dormlq(blas.Left, blas.NoTrans, n, nrhs, m, + a, lda, + work, + b, ldb, + work[mn:], lwork-mn) + ok := impl.Dtrtrs(blas.Lower, blas.Trans, blas.NonUnit, + m, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + } + } + + // Adjust answer vector based on scaling. + if iascl == 1 { + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, scllen, nrhs, b, ldb) + } + if iascl == 2 { + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, scllen, nrhs, b, ldb) + } + if ibscl == 1 { + impl.Dlascl(lapack.General, 0, 0, smlnum, bnrm, scllen, nrhs, b, ldb) + } + if ibscl == 2 { + impl.Dlascl(lapack.General, 0, 0, bignum, bnrm, scllen, nrhs, b, ldb) + } + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go new file mode 100644 index 00000000..6d9b7413 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go @@ -0,0 +1,45 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgeql2 computes the QL factorization of the m×n matrix A. That is, Dgeql2 +// computes Q and L such that +// A = Q * L +// where Q is an m×m orthonormal matrix and L is a lower trapezoidal matrix. +// +// Q is represented as a product of elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0 +// where k = min(m,n) and each H_i has the form +// H_i = I - tau[i] * v_i * v_i^T +// Vector v_i has v[m-k+i+1:m] = 0, v[m-k+i] = 1, and v[:m-k+i+1] is stored on +// exit in A[0:m-k+i-1, n-k+i]. +// +// tau must have length at least min(m,n), and Dgeql2 will panic otherwise. +// +// work is temporary memory storage and must have length at least n. +// +// Dgeql2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeql2(m, n int, a []float64, lda int, tau, work []float64) { + checkMatrix(m, n, a, lda) + if len(tau) < min(m, n) { + panic(badTau) + } + if len(work) < n { + panic(badWork) + } + k := min(m, n) + var aii float64 + for i := k - 1; i >= 0; i-- { + // Generate elementary reflector H_i to annihilate A[0:m-k+i-1, n-k+i]. + aii, tau[i] = impl.Dlarfg(m-k+i+1, a[(m-k+i)*lda+n-k+i], a[n-k+i:], lda) + + // Apply H_i to A[0:m-k+i, 0:n-k+i-1] from the left. + a[(m-k+i)*lda+n-k+i] = 1 + impl.Dlarf(blas.Left, m-k+i+1, n-k+i, a[n-k+i:], lda, tau[i], a, lda, work) + a[(m-k+i)*lda+n-k+i] = aii + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go new file mode 100644 index 00000000..ef71b3ad --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go @@ -0,0 +1,174 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgeqp3 computes a QR factorization with column pivoting of the +// m×n matrix A: A*P = Q*R using Level 3 BLAS. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{k-1}, where k = min(m,n). +// Each H_i has the form +// H_i = I - tau * v * v^T +// where tau and v are real vectors with v[0:i-1] = 0 and v[i] = 1; +// v[i:m] is stored on exit in A[i:m, i], and tau in tau[i]. +// +// jpvt specifies a column pivot to be applied to A. If +// jpvt[j] is at least zero, the jth column of A is permuted +// to the front of A*P (a leading column), if jpvt[j] is -1 +// the jth column of A is a free column. If jpvt[j] < -1, Dgeqp3 +// will panic. On return, jpvt holds the permutation that was +// applied; the jth column of A*P was the jpvt[j] column of A. +// jpvt must have length n or Dgeqp3 will panic. +// +// tau holds the scalar factors of the elementary reflectors. +// It must have length min(m, n), otherwise Dgeqp3 will panic. +// +// work must have length at least max(1,lwork), and lwork must be at least +// 3*n+1, otherwise Dgeqp3 will panic. For optimal performance lwork must +// be at least 2*n+(n+1)*nb, where nb is the optimal blocksize. On return, +// work[0] will contain the optimal value of lwork. +// +// If lwork == -1, instead of performing Dgeqp3, only the optimal value of lwork +// will be stored in work[0]. +// +// Dgeqp3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeqp3(m, n int, a []float64, lda int, jpvt []int, tau, work []float64, lwork int) { + const ( + inb = 1 + inbmin = 2 + ixover = 3 + ) + checkMatrix(m, n, a, lda) + + if len(jpvt) != n { + panic(badIpiv) + } + for _, v := range jpvt { + if v < -1 || n <= v { + panic("lapack: jpvt element out of range") + } + } + minmn := min(m, n) + if len(work) < max(1, lwork) { + panic(badWork) + } + + var iws, lwkopt, nb int + if minmn == 0 { + iws = 1 + lwkopt = 1 + } else { + iws = 3*n + 1 + nb = impl.Ilaenv(inb, "DGEQRF", " ", m, n, -1, -1) + lwkopt = 2*n + (n+1)*nb + } + work[0] = float64(lwkopt) + + if lwork == -1 { + return + } + + if len(tau) < minmn { + panic(badTau) + } + + bi := blas64.Implementation() + + // Move initial columns up front. + var nfxd int + for j := 0; j < n; j++ { + if jpvt[j] == -1 { + jpvt[j] = j + continue + } + if j != nfxd { + bi.Dswap(m, a[j:], lda, a[nfxd:], lda) + jpvt[j], jpvt[nfxd] = jpvt[nfxd], j + } else { + jpvt[j] = j + } + nfxd++ + } + + // Factorize nfxd columns. + // + // Compute the QR factorization of nfxd columns and update remaining columns. + if nfxd > 0 { + na := min(m, nfxd) + impl.Dgeqrf(m, na, a, lda, tau, work, lwork) + iws = max(iws, int(work[0])) + if na < n { + impl.Dormqr(blas.Left, blas.Trans, m, n-na, na, a, lda, tau[:na], a[na:], lda, + work, lwork) + iws = max(iws, int(work[0])) + } + } + + if nfxd >= minmn { + work[0] = float64(iws) + return + } + + // Factorize free columns. + sm := m - nfxd + sn := n - nfxd + sminmn := minmn - nfxd + + // Determine the block size. + nb = impl.Ilaenv(inb, "DGEQRF", " ", sm, sn, -1, -1) + nbmin := 2 + nx := 0 + + if 1 < nb && nb < sminmn { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(ixover, "DGEQRF", " ", sm, sn, -1, -1)) + + if nx < sminmn { + // Determine if workspace is large enough for blocked code. + minws := 2*sn + (sn+1)*nb + iws = max(iws, minws) + if lwork < minws { + // Not enough workspace to use optimal nb. Reduce + // nb and determine the minimum value of nb. + nb = (lwork - 2*sn) / (sn + 1) + nbmin = max(2, impl.Ilaenv(inbmin, "DGEQRF", " ", sm, sn, -1, -1)) + } + } + } + + // Initialize partial column norms. + // The first n elements of work store the exact column norms. + for j := nfxd; j < n; j++ { + work[j] = bi.Dnrm2(sm, a[nfxd*lda+j:], lda) + work[n+j] = work[j] + } + j := nfxd + if nbmin <= nb && nb < sminmn && nx < sminmn { + // Use blocked code initially. + + // Compute factorization. + var fjb int + for topbmn := minmn - nx; j < topbmn; j += fjb { + jb := min(nb, topbmn-j) + + // Factorize jb columns among columns j:n. + fjb = impl.Dlaqps(m, n-j, j, jb, a[j:], lda, jpvt[j:], tau[j:], + work[j:n], work[j+n:2*n], work[2*n:2*n+jb], work[2*n+jb:], jb) + } + } + + // Use unblocked code to factor the last or only block. + if j < minmn { + impl.Dlaqp2(m, n-j, j, a[j:], lda, jpvt[j:], tau[j:], + work[j:n], work[j+n:2*n], work[2*n:]) + } + + work[0] = float64(iws) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go new file mode 100644 index 00000000..05df4265 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go @@ -0,0 +1,59 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgeqr2 computes a QR factorization of the m×n matrix A. +// +// In a QR factorization, Q is an m×m orthonormal matrix, and R is an +// upper triangular m×n matrix. +// +// A is modified to contain the information to construct Q and R. +// The upper triangle of a contains the matrix R. The lower triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length at least min(m,n), and +// this function will panic otherwise. +// +// The ith elementary reflector can be explicitly constructed by first extracting +// the +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// and computing H_i = I - tau[i] * v * v^T. +// +// The orthonormal matrix Q can be constructed from a product of these elementary +// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). +// +// work is temporary storage of length at least n and this function will panic otherwise. +// +// Dgeqr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeqr2(m, n int, a []float64, lda int, tau, work []float64) { + // TODO(btracey): This is oriented such that columns of a are eliminated. + // This likely could be re-arranged to take better advantage of row-major + // storage. + checkMatrix(m, n, a, lda) + if len(work) < n { + panic(badWork) + } + k := min(m, n) + if len(tau) < k { + panic(badTau) + } + for i := 0; i < k; i++ { + // Generate elementary reflector H_i. + a[i*lda+i], tau[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min((i+1), m-1)*lda+i:], lda) + if i < n-1 { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(blas.Left, m-i, n-i-1, + a[i*lda+i:], lda, + tau[i], + a[i*lda+i+1:], lda, + work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go new file mode 100644 index 00000000..a8a8155a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go @@ -0,0 +1,98 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgeqrf computes the QR factorization of the m×n matrix A using a blocked +// algorithm. See the documentation for Dgeqr2 for a description of the +// parameters at entry and exit. +// +// work is temporary storage, and lwork specifies the usable memory length. +// The length of work must be at least max(1, lwork) and lwork must be -1 +// or at least n, otherwise this function will panic. +// Dgeqrf is a blocked QR factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgeqrf, +// the optimal work length will be stored into work[0]. +// +// tau must have length at least min(m,n), and this function will panic otherwise. +func (impl Implementation) Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + if len(work) < max(1, lwork) { + panic(shortWork) + } + // nb is the optimal blocksize, i.e. the number of columns transformed at a time. + nb := impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) + lworkopt := max(1, n*nb) + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + checkMatrix(m, n, a, lda) + if lwork < n { + panic(badWork) + } + k := min(m, n) + if len(tau) < k { + panic(badTau) + } + if k == 0 { + work[0] = 1 + return + } + nbmin := 2 // Minimal block size. + var nx int // Use unblocked (unless changed in the next for loop) + iws := n + ldwork := nb + // Only consider blocked if the suggested block size is > 1 and the + // number of rows or columns is sufficiently large. + if 1 < nb && nb < k { + // nx is the block size at which the code switches from blocked + // to unblocked. + nx = max(0, impl.Ilaenv(3, "DGEQRF", " ", m, n, -1, -1)) + if k > nx { + iws = ldwork * n + if lwork < iws { + // Not enough workspace to use the optimal block + // size. Get the minimum block size instead. + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DGEQRF", " ", m, n, -1, -1)) + } + } + } + for i := range work { + work[i] = 0 + } + // Compute QR using a blocked algorithm. + var i int + if nbmin <= nb && nb < k && nx < k { + for i = 0; i < k-nx; i += nb { + ib := min(k-i, nb) + // Compute the QR factorization of the current block. + impl.Dgeqr2(m-i, ib, a[i*lda+i:], lda, tau[i:], work) + if i+ib < n { + // Form the triangular factor of the block reflector and apply H^T + // In Dlarft, work becomes the T matrix. + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, + m-i, n-i-ib, ib, + a[i*lda+i:], lda, + work, ldwork, + a[i*lda+i+ib:], lda, + work[ib*ldwork:], ldwork) + } + } + } + // Call unblocked code on the remaining columns. + if i < k { + impl.Dgeqr2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) + } + work[0] = float64(lworkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go new file mode 100644 index 00000000..52ac2cb8 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go @@ -0,0 +1,53 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgerq2 computes an RQ factorization of the m×n matrix A, +// A = R * Q. +// On exit, if m <= n, the upper triangle of the subarray +// A[0:m, n-m:n] contains the m×m upper triangular matrix R. +// If m >= n, the elements on and above the (m-n)-th subdiagonal +// contain the m×n upper trapezoidal matrix R. +// The remaining elements, with tau, represent the +// orthogonal matrix Q as a product of min(m,n) elementary +// reflectors. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// Each H(i) has the form +// H_i = I - tau_i * v * v^T +// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], +// v[n-k+i:n] = 0 and v[n-k+i] = 1. +// +// tau must have length min(m,n) and work must have length m, otherwise +// Dgerq2 will panic. +// +// Dgerq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgerq2(m, n int, a []float64, lda int, tau, work []float64) { + checkMatrix(m, n, a, lda) + k := min(m, n) + if len(tau) < k { + panic(badTau) + } + if len(work) < m { + panic(badWork) + } + + for i := k - 1; i >= 0; i-- { + // Generate elementary reflector H[i] to annihilate + // A[m-k+i, 0:n-k+i-1]. + mki := m - k + i + nki := n - k + i + var aii float64 + aii, tau[i] = impl.Dlarfg(nki+1, a[mki*lda+nki], a[mki*lda:], 1) + + // Apply H[i] to A[0:m-k+i-1, 0:n-k+i] from the right. + a[mki*lda+nki] = 1 + impl.Dlarf(blas.Right, mki, nki+1, a[mki*lda:], 1, tau[i], a, lda, work) + a[mki*lda+nki] = aii + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go new file mode 100644 index 00000000..7ecdf55f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go @@ -0,0 +1,128 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgerqf computes an RQ factorization of the m×n matrix A, +// A = R * Q. +// On exit, if m <= n, the upper triangle of the subarray +// A[0:m, n-m:n] contains the m×m upper triangular matrix R. +// If m >= n, the elements on and above the (m-n)-th subdiagonal +// contain the m×n upper trapezoidal matrix R. +// The remaining elements, with tau, represent the +// orthogonal matrix Q as a product of min(m,n) elementary +// reflectors. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// Each H(i) has the form +// H_i = I - tau_i * v * v^T +// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], +// v[n-k+i:n] = 0 and v[n-k+i] = 1. +// +// tau must have length min(m,n), work must have length max(1, lwork), +// and lwork must be -1 or at least max(1, m), otherwise Dgerqf will panic. +// On exit, work[0] will contain the optimal length for work. +// +// Dgerqf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgerqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + checkMatrix(m, n, a, lda) + + if len(work) < max(1, lwork) { + panic(shortWork) + } + if lwork != -1 && lwork < max(1, m) { + panic(badWork) + } + + k := min(m, n) + if len(tau) != k { + panic(badTau) + } + + var nb, lwkopt int + if k == 0 { + lwkopt = 1 + } else { + nb = impl.Ilaenv(1, "DGERQF", " ", m, n, -1, -1) + lwkopt = m * nb + } + work[0] = float64(lwkopt) + + if lwork == -1 { + return + } + + // Return quickly if possible. + if k == 0 { + return + } + + nbmin := 2 + nx := 1 + iws := m + var ldwork int + if 1 < nb && nb < k { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(3, "DGERQF", " ", m, n, -1, -1)) + if nx < k { + // Determine whether workspace is large enough for blocked code. + iws = m * nb + if lwork < iws { + // Not enough workspace to use optimal nb. Reduce + // nb and determine the minimum value of nb. + nb = lwork / m + nbmin = max(2, impl.Ilaenv(2, "DGERQF", " ", m, n, -1, -1)) + } + ldwork = nb + } + } + + var mu, nu int + if nbmin <= nb && nb < k && nx < k { + // Use blocked code initially. + // The last kk rows are handled by the block method. + ki := ((k - nx - 1) / nb) * nb + kk := min(k, ki+nb) + + var i int + for i = k - kk + ki; i >= k-kk; i -= nb { + ib := min(k-i, nb) + + // Compute the RQ factorization of the current block + // A[m-k+i:m-k+i+ib-1, 0:n-k+i+ib-1]. + impl.Dgerq2(ib, n-k+i+ib, a[(m-k+i)*lda:], lda, tau[i:], work) + if m-k+i > 0 { + // Form the triangular factor of the block reflector + // H = H_{i+ib-1} . . . H_{i+1} H_i. + impl.Dlarft(lapack.Backward, lapack.RowWise, + n-k+i+ib, ib, a[(m-k+i)*lda:], lda, tau[i:], + work, ldwork) + + // Apply H to A[0:m-k+i-1, 0:n-k+i+ib-1] from the right. + impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Backward, lapack.RowWise, + m-k+i, n-k+i+ib, ib, a[(m-k+i)*lda:], lda, + work, ldwork, + a, lda, + work[ib*ldwork:], ldwork) + } + } + mu = m - k + i + nb + nu = n - k + i + nb + } else { + mu = m + nu = n + } + + // Use unblocked code to factor the last or only block. + if mu > 0 && nu > 0 { + impl.Dgerq2(mu, nu, a, lda, tau, work) + } + work[0] = float64(iws) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go new file mode 100644 index 00000000..70f6db89 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go @@ -0,0 +1,1356 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +const noSVDO = "dgesvd: not coded for overwrite" + +// Dgesvd computes the singular value decomposition of the input matrix A. +// +// The singular value decomposition is +// A = U * Sigma * V^T +// where Sigma is an m×n diagonal matrix containing the singular values of A, +// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first +// min(m,n) columns of U and V are the left and right singular vectors of A +// respectively. +// +// jobU and jobVT are options for computing the singular vectors. The behavior +// is as follows +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDInPlace The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// The behavior is the same for jobVT and the rows of V^T. At most one of jobU +// and jobVT can equal lapack.SVDOverwrite, and Dgesvd will panic otherwise. +// +// On entry, a contains the data for the m×n matrix A. During the call to Dgesvd +// the data is overwritten. On exit, A contains the appropriate singular vectors +// if either job is lapack.SVDOverwrite. +// +// s is a slice of length at least min(m,n) and on exit contains the singular +// values in decreasing order. +// +// u contains the left singular vectors on exit, stored column-wise. If +// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDInPlace u is +// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is +// not used. +// +// vt contains the left singular vectors on exit, stored row-wise. If +// jobV == lapack.SVDAll, vt is of size n×m. If jobVT == lapack.SVDInPlace vt is +// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is +// not used. +// +// work is a slice for storing temporary memory, and lwork is the usable size of +// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). +// If lwork == -1, instead of performing Dgesvd, the optimal work length will be +// stored into work[0]. Dgesvd will panic if the working memory has insufficient +// storage. +// +// Dgesvd returns whether the decomposition successfully completed. +func (impl Implementation) Dgesvd(jobU, jobVT lapack.SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) { + minmn := min(m, n) + checkMatrix(m, n, a, lda) + if jobU == lapack.SVDAll { + checkMatrix(m, m, u, ldu) + } else if jobU == lapack.SVDInPlace { + checkMatrix(m, minmn, u, ldu) + } + if jobVT == lapack.SVDAll { + checkMatrix(n, n, vt, ldvt) + } else if jobVT == lapack.SVDInPlace { + checkMatrix(minmn, n, vt, ldvt) + } + if jobU == lapack.SVDOverwrite && jobVT == lapack.SVDOverwrite { + panic("lapack: both jobU and jobVT are lapack.SVDOverwrite") + } + if len(s) < minmn { + panic(badS) + } + if jobU == lapack.SVDOverwrite || jobVT == lapack.SVDOverwrite { + panic(noSVDO) + } + if m == 0 || n == 0 { + return true + } + + wantua := jobU == lapack.SVDAll + wantus := jobU == lapack.SVDInPlace + wantuas := wantua || wantus + wantuo := jobU == lapack.SVDOverwrite + wantun := jobU == lapack.None + + wantva := jobVT == lapack.SVDAll + wantvs := jobVT == lapack.SVDInPlace + wantvas := wantva || wantvs + wantvo := jobVT == lapack.SVDOverwrite + wantvn := jobVT == lapack.None + + bi := blas64.Implementation() + var mnthr int + + // Compute optimal space for subroutines. + maxwrk := 1 + opts := string(jobU) + string(jobVT) + var wrkbl, bdspac int + if m >= n { + mnthr = impl.Ilaenv(6, "DGESVD", opts, m, n, 0, 0) + bdspac = 5 * n + impl.Dgeqrf(m, n, a, lda, nil, work, -1) + lwork_dgeqrf := int(work[0]) + impl.Dorgqr(m, n, n, a, lda, nil, work, -1) + lwork_dorgqr_n := int(work[0]) + impl.Dorgqr(m, m, n, a, lda, nil, work, -1) + lwork_dorgqr_m := int(work[0]) + impl.Dgebrd(n, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + impl.Dorgbr(lapack.ApplyP, n, n, n, a, lda, nil, work, -1) + lwork_dorgbr_p := int(work[0]) + impl.Dorgbr(lapack.ApplyQ, n, n, n, a, lda, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + + if m >= mnthr { + // m >> n + if wantun { + // Path 1 + maxwrk = n + lwork_dgeqrf + maxwrk = max(maxwrk, 3*n+lwork_dgebrd) + if wantvo || wantvas { + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) + } + maxwrk = max(maxwrk, bdspac) + } else if wantuo && wantvn { + // Path 2 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(n*n+wrkbl, n*n+m*n+n) + } else if wantuo && wantvs { + // Path 3 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(n*n+wrkbl, n*n+m*n+n) + } else if wantus && wantvn { + // Path 4 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantus && wantvo { + // Path 5 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*n*n + wrkbl + } else if wantus && wantvas { + // Path 6 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantua && wantvn { + // Path 7 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantua && wantvo { + // Path 8 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*n*n + wrkbl + } else if wantua && wantvas { + // Path 9 + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } + } else { + // Path 10: m > n + impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + maxwrk = 3*n + lwork_dgebrd + if wantus || wantuo { + impl.Dorgbr(lapack.ApplyQ, m, n, n, a, lda, nil, work, -1) + lwork_dorgbr_q = int(work[0]) + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) + } + if wantua { + impl.Dorgbr(lapack.ApplyQ, m, m, n, a, lda, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) + } + if !wantvn { + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) + } + maxwrk = max(maxwrk, bdspac) + } + } else { + mnthr = impl.Ilaenv(6, "DGESVD", opts, m, n, 0, 0) + + bdspac = 5 * m + impl.Dgelqf(m, n, a, lda, nil, work, -1) + lwork_dgelqf := int(work[0]) + impl.Dorglq(n, n, m, nil, n, nil, work, -1) + lwork_dorglq_n := int(work[0]) + impl.Dorglq(m, n, m, a, lda, nil, work, -1) + lwork_dorglq_m := int(work[0]) + impl.Dgebrd(m, m, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + impl.Dorgbr(lapack.ApplyP, m, m, m, a, n, nil, work, -1) + lwork_dorgbr_p := int(work[0]) + impl.Dorgbr(lapack.ApplyQ, m, m, m, a, n, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + if n >= mnthr { + // n >> m + if wantvn { + // Path 1t + maxwrk = m + lwork_dgelqf + maxwrk = max(maxwrk, 3*m+lwork_dgebrd) + if wantuo || wantuas { + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) + } + maxwrk = max(maxwrk, bdspac) + } else if wantvo && wantun { + // Path 2t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(m*m+wrkbl, m*m+m*n+m) + } else if wantvo && wantuas { + // Path 3t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(m*m+wrkbl, m*m+m*n+m) + } else if wantvs && wantun { + // Path 4t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantvs && wantuo { + // Path 5t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*m*m + wrkbl + } else if wantvs && wantuas { + // Path 6t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantva && wantun { + // Path 7t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantva && wantuo { + // Path 8t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*m*m + wrkbl + } else if wantva && wantuas { + // Path 9t + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } + } else { + // Path 10t, n > m + impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd = int(work[0]) + maxwrk = 3*m + lwork_dgebrd + if wantvs || wantvo { + impl.Dorgbr(lapack.ApplyP, m, n, m, a, n, nil, work, -1) + lwork_dorgbr_p = int(work[0]) + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) + } + if wantva { + impl.Dorgbr(lapack.ApplyP, n, n, m, a, n, nil, work, -1) + lwork_dorgbr_p = int(work[0]) + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) + } + if !wantun { + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) + } + maxwrk = max(maxwrk, bdspac) + } + } + + minWork := max(1, 5*minmn) + if !((wantun && m >= mnthr) || (wantvn && n >= mnthr)) { + minWork = max(minWork, 3*minmn+max(m, n)) + } + + if lwork != -1 { + if len(work) < lwork { + panic(badWork) + } + if lwork < minWork { + panic(badWork) + } + } + if m == 0 || n == 0 { + return true + } + + maxwrk = max(maxwrk, minWork) + work[0] = float64(maxwrk) + if lwork == -1 { + return true + } + + // Perform decomposition. + eps := dlamchE + smlnum := math.Sqrt(dlamchS) / eps + bignum := 1 / smlnum + + // Scale A if max element outside range [smlnum, bignum]. + anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) + var iscl bool + if anrm > 0 && anrm < smlnum { + iscl = true + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) + } else if anrm > bignum { + iscl = true + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) + } + + var ie int + if m >= n { + // If A has sufficiently more rows than columns, use the QR decomposition. + if m >= mnthr { + // m >> n + if wantun { + // Path 1. + itau := 0 + iwork := itau + n + + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Zero out below R. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + ie = 0 + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + ncvt := 0 + if wantvo || wantvas { + // Generate P^T. + impl.Dorgbr(lapack.ApplyP, n, n, n, a, lda, work[itaup:], + work[iwork:], lwork-iwork) + ncvt = n + } + iwork = ie + n + + // Perform bidiagonal QR iteration computing right singular vectors + // of A in A if desired. + ok = impl.Dbdsqr(blas.Upper, n, ncvt, 0, 0, s, work[ie:], + a, lda, work, 1, work, 1, work[iwork:]) + + // If right singular vectors desired in VT, copy them there. + if wantvas { + impl.Dlacpy(blas.All, n, n, a, lda, vt, ldvt) + } + } else if wantuo && wantvn { + // Path 2 + panic(noSVDO) + } else if wantuo && wantvas { + // Path 3 + panic(noSVDO) + } else if wantus { + if wantvn { + // Path 4 + if lwork >= n*n+max(4*n, bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*n { + ldworkr = lda + } else { + ldworkr = n + } + itau := ir + ldworkr*n + iwork := itau + n + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to work[ir:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) + + // Generate Q in A. + impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[ir:]. + impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate left vectors bidiagonalizing R in work[ir:]. + impl.Dorgbr(lapack.ApplyQ, n, n, n, work[ir:], ldworkr, + work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, compuing left singular + // vectors of R in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, + work[ir:], ldworkr, work, 1, work[iwork:]) + + // Multiply Q in A by left singular vectors of R in + // work[ir:], storing result in U. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, + work[ir:], ldworkr, 0, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Zero out below R in A. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left vectors bidiagonalizing R. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left + // singular vectors of A in U. + ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], work, 1, + u, ldu, work, 1, work[iwork:]) + } + } else if wantvo { + // Path 5 + panic(noSVDO) + } else if wantvas { + // Path 6 + if lwork >= n*n+max(4*n, bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*n { + ldworku = lda + } else { + ldworku = n + } + itau := iu + ldworku*n + iwork := itau + n + + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + // Copy R to work[iu:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) + + // Generate Q in A. + impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[iu:], copying result to VT. + impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) + + // Generate left bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.ApplyQ, n, n, n, work[iu:], ldworku, + work[itauq:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.ApplyP, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[iu:], and computing right singular + // vectors of R in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], + vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) + + // Multiply Q in A by left singular vectors of R in + // work[iu:], storing result in U. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, + work[iu:], ldworku, 0, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q * R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to VT, zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.ApplyP, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } else if wantua { + if wantvn { + // Path 7 + if lwork >= n*n+max(max(n+m, 4*n), bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*n { + ldworkr = lda + } else { + ldworkr = n + } + itau := ir + ldworkr*n + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Copy R to work[ir:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[ir:]. + impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in work[ir:]. + impl.Dorgbr(lapack.ApplyQ, n, n, n, work[ir:], ldworkr, + work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, + work[ir:], ldworkr, work, 1, work[iwork:]) + + // Multiply Q in U by left singular vectors of R in + // work[ir:], storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, u, ldu, + work[ir:], ldworkr, 0, a, lda) + + // Copy left singular vectors of A from A to U. + impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Zero out below R in A. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in A. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left + // singular vectors of A in U. + ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], + work, 1, u, ldu, work, 1, work[iwork:]) + } + } else if wantvo { + // Path 8. + panic(noSVDO) + } else if wantvas { + // Path 9. + if lwork >= n*n+max(max(n+m, 4*n), bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*n { + ldworku = lda + } else { + ldworku = n + } + itau := iu + ldworku*n + iwork := itau + n + + // Compute A = Q * R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to work[iu:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) + + ie = itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[iu:], copying result to VT. + impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) + + // Generate left bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.ApplyQ, n, n, n, work[iu:], ldworku, + work[itauq:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.ApplyP, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[iu:] and computing right + // singular vectors of R in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], + vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) + + // Multiply Q in U by left singular vectors of R in + // work[iu:], storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, + u, ldu, work[iu:], ldworku, 0, a, lda) + + // Copy left singular vectors of A from A to U. + impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) + + /* + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, + m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.ApplyP, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + */ + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R from A to VT, zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, + m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonizing vectors in VT. + impl.Dorgbr(lapack.ApplyP, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } + } else { + // Path 10. + // M at least N, but not much larger. + ie = 0 + itauq := ie + n + itaup := itauq + n + iwork := itaup + n + + // Bidiagonalize A. + impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + if wantuas { + // Left singular vectors are desired in U. Copy result to U and + // generate left biadiagonalizing vectors in U. + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + var ncu int + if wantus { + ncu = n + } + if wantua { + ncu = m + } + impl.Dorgbr(lapack.ApplyQ, m, ncu, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + } + if wantvas { + // Right singular vectors are desired in VT. Copy result to VT and + // generate left biadiagonalizing vectors in VT. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + impl.Dorgbr(lapack.ApplyP, n, n, n, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) + } + if wantuo { + panic(noSVDO) + } + if wantvo { + panic(noSVDO) + } + iwork = ie + n + var nru, ncvt int + if wantuas || wantuo { + nru = m + } + if wantun { + nru = 0 + } + if wantvas || wantvo { + ncvt = n + } + if wantvn { + ncvt = 0 + } + if !wantuo && !wantvo { + // Perform bidiagonal QR iteration, if desired, computing left + // singular vectors in U and right singular vectors in VT. + ok = impl.Dbdsqr(blas.Upper, n, ncvt, nru, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } else { + // There will be two branches when the implementation is complete. + panic(noSVDO) + } + } + } else { + // A has more columns than rows. If A has sufficiently more columns than + // rows, first reduce using the LQ decomposition. + if n >= mnthr { + // n >> m. + if wantvn { + // Path 1t. + itau := 0 + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Zero out above L. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + ie := 0 + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:itauq], + work[itauq:itaup], work[itaup:iwork], work[iwork:], lwork-iwork) + if wantuo || wantuas { + impl.Dorgbr(lapack.ApplyQ, m, m, m, a, lda, + work[itauq:], work[iwork:], lwork-iwork) + } + iwork = ie + m + nru := 0 + if wantuo || wantuas { + nru = m + } + + // Perform bidiagonal QR iteration, computing left singular vectors + // of A in A if desired. + ok = impl.Dbdsqr(blas.Upper, m, 0, nru, 0, s, work[ie:], + work, 1, a, lda, work, 1, work[iwork:]) + + // If left singular vectors desired in U, copy them there. + if wantuas { + impl.Dlacpy(blas.All, m, m, a, lda, u, ldu) + } + } else if wantvo && wantun { + // Path 2t. + panic(noSVDO) + } else if wantvo && wantuas { + // Path 3t. + panic(noSVDO) + } else if wantvs { + if wantun { + // Path 4t. + if lwork >= m*m+max(4*m, bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*m { + ldworkr = lda + } else { + ldworkr = m + } + itau := ir + ldworkr*m + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[ir:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) + + // Generate Q in A. + impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[ir:]. + impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate right vectors bidiagonalizing L in work[ir:]. + impl.Dorgbr(lapack.ApplyP, m, m, m, work[ir:], ldworkr, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right singular + // vectors of L in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], + work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[ir:] by + // Q in A, storing result in VT. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[ir:], ldworkr, a, lda, 0, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy result to VT. + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Zero out above L in A. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right vectors bidiagonalizing L by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right + // singular vectors of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], + vt, ldvt, work, 1, work, 1, work[iwork:]) + } + } else if wantuo { + // Path 5t. + panic(noSVDO) + } else if wantuas { + // Path 6t. + if lwork >= m*m+max(4*m, bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*m { + ldworku = lda + } else { + ldworku = m + } + itau := iu + ldworku*m + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[iu:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) + + // Generate Q in A. + impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[iu:], copying result to U. + impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) + + // Generate right bidiagionalizing vectors in work[iu:]. + impl.Dorgbr(lapack.ApplyP, m, m, m, work[iu:], ldworku, + work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.ApplyQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of L in U and computing right singular vectors of + // L in work[iu:]. + ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], + work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[iu:] by + // Q in A, storing result in VT. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[iu:], ldworku, a, lda, 0, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L*Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to U, zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in U. + impl.Dgebrd(m, m, u, ldu, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in U by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.ApplyQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], vt, ldvt, + u, ldu, work, 1, work[iwork:]) + } + } + } else if wantva { + if wantun { + // Path 7t. + if lwork >= m*m+max(max(n+m, 4*m), bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*m { + ldworkr = lda + } else { + ldworkr = m + } + itau := ir + ldworkr*m + iwork := itau + m + + // Compute A = L*Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Copy L to work[ir:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[ir:]. + impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in work[ir:]. + impl.Dorgbr(lapack.ApplyP, m, m, m, work[ir:], ldworkr, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right + // singular vectors of L in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], + work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[ir:] by + // Q in VT, storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[ir:], ldworkr, vt, ldvt, 0, a, lda) + + // Copy right singular vectors of A from A to VT. + impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + // Compute A = L * Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Zero out above L in A. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in A by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right singular + // vectors of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], + vt, ldvt, work, 1, work, 1, work[iwork:]) + } + } else if wantuo { + panic(noSVDO) + } else if wantuas { + // Path 9t. + if lwork >= m*m+max(max(m+n, 4*m), bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + + var ldworku int + if lwork >= wrkbl+lda*m { + ldworku = lda + } else { + ldworku = m + } + itau := iu + ldworku*m + iwork := itau + m + + // Generate A = L * Q copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[iu:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) + ie = itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[iu:], copying result to U. + impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) + + // Generate right bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.ApplyP, m, m, m, work[iu:], ldworku, + work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.ApplyQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of L in U and computing right singular vectors + // of L in work[iu:]. + ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], + work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[iu:] + // Q in VT, storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[iu:], ldworku, vt, ldvt, 0, a, lda) + + // Copy right singular vectors of A from A to VT. + impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L * Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to U, zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) + + ie = itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in U. + impl.Dgebrd(m, m, u, ldu, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in U by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.ApplyQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } + } else { + // Path 10t. + // N at least M, but not much larger. + ie = 0 + itauq := ie + m + itaup := itauq + m + iwork := itaup + m + + // Bidiagonalize A. + impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + if wantuas { + // If left singular vectors desired in U, copy result to U and + // generate left bidiagonalizing vectors in U. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dorgbr(lapack.ApplyQ, m, m, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + } + if wantvas { + // If right singular vectors desired in VT, copy result to VT + // and generate right bidiagonalizing vectors in VT. + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + var nrvt int + if wantva { + nrvt = n + } else { + nrvt = m + } + impl.Dorgbr(lapack.ApplyP, nrvt, n, m, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) + } + if wantuo { + panic(noSVDO) + } + if wantvo { + panic(noSVDO) + } + iwork = ie + m + var nru, ncvt int + if wantuas || wantuo { + nru = m + } + if wantvas || wantvo { + ncvt = n + } + if !wantuo && !wantvo { + // Perform bidiagonal QR iteration, if desired, computing left + // singular vectors in U and computing right singular vectors in + // VT. + ok = impl.Dbdsqr(blas.Lower, m, ncvt, nru, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } else { + // There will be two branches when the implementation is complete. + panic(noSVDO) + } + } + } + if !ok { + if ie > 1 { + for i := 0; i < minmn-1; i++ { + work[i+1] = work[i+ie] + } + } + if ie < 1 { + for i := minmn - 2; i >= 0; i-- { + work[i+1] = work[i+ie] + } + } + } + // Undo scaling if necessary. + if iscl { + if anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bignum, anrm, minmn, 1, s, minmn) + } + if !ok && anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bignum, anrm, minmn-1, 1, work[minmn:], minmn) + } + if anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, minmn, 1, s, minmn) + } + if !ok && anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, minmn-1, 1, work[minmn:], minmn) + } + } + work[0] = float64(maxwrk) + return ok +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go new file mode 100644 index 00000000..1256bf34 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go @@ -0,0 +1,69 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetf2 computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of a into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Dgetf2 returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +// +// Dgetf2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dgetf2(m, n int, a []float64, lda int, ipiv []int) (ok bool) { + mn := min(m, n) + checkMatrix(m, n, a, lda) + if len(ipiv) < mn { + panic(badIpiv) + } + if m == 0 || n == 0 { + return true + } + bi := blas64.Implementation() + sfmin := dlamchS + ok = true + for j := 0; j < mn; j++ { + // Find a pivot and test for singularity. + jp := j + bi.Idamax(m-j, a[j*lda+j:], lda) + ipiv[j] = jp + if a[jp*lda+j] == 0 { + ok = false + } else { + // Swap the rows if necessary. + if jp != j { + bi.Dswap(n, a[j*lda:], 1, a[jp*lda:], 1) + } + if j < m-1 { + aj := a[j*lda+j] + if math.Abs(aj) >= sfmin { + bi.Dscal(m-j-1, 1/aj, a[(j+1)*lda+j:], lda) + } else { + for i := 0; i < m-j-1; i++ { + a[(j+1)*lda+j] = a[(j+1)*lda+j] / a[lda*j+j] + } + } + } + } + if j < mn-1 { + bi.Dger(m-j-1, n-j-1, -1, a[(j+1)*lda+j:], lda, a[j*lda+j+1:], 1, a[(j+1)*lda+j+1:], lda) + } + } + return ok +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go new file mode 100644 index 00000000..7c0cc25b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go @@ -0,0 +1,70 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetrf computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of A into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Dgetrf is the blocked version of the algorithm. +// +// Dgetrf returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +func (impl Implementation) Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) { + mn := min(m, n) + checkMatrix(m, n, a, lda) + if len(ipiv) < mn { + panic(badIpiv) + } + if m == 0 || n == 0 { + return false + } + bi := blas64.Implementation() + nb := impl.Ilaenv(1, "DGETRF", " ", m, n, -1, -1) + if nb <= 1 || nb >= min(m, n) { + // Use the unblocked algorithm. + return impl.Dgetf2(m, n, a, lda, ipiv) + } + ok = true + for j := 0; j < mn; j += nb { + jb := min(mn-j, nb) + blockOk := impl.Dgetf2(m-j, jb, a[j*lda+j:], lda, ipiv[j:]) + if !blockOk { + ok = false + } + for i := j; i <= min(m-1, j+jb-1); i++ { + ipiv[i] = j + ipiv[i] + } + impl.Dlaswp(j, a, lda, j, j+jb-1, ipiv[:j+jb], 1) + if j+jb < n { + impl.Dlaswp(n-j-jb, a[j+jb:], lda, j, j+jb-1, ipiv[:j+jb], 1) + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, + jb, n-j-jb, 1, + a[j*lda+j:], lda, + a[j*lda+j+jb:], lda) + if j+jb < m { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m-j-jb, n-j-jb, jb, -1, + a[(j+jb)*lda+j:], lda, + a[j*lda+j+jb:], lda, + 1, a[(j+jb)*lda+j+jb:], lda) + } + } + } + return ok +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go new file mode 100644 index 00000000..47f6306e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go @@ -0,0 +1,92 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetri computes the inverse of the matrix A using the LU factorization computed +// by Dgetrf. On entry, a contains the PLU decomposition of A as computed by +// Dgetrf and on exit contains the reciprocal of the original matrix. +// +// Dgetri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= n and this function will panic otherwise. +// Dgetri is a blocked inversion, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgetri, +// the optimal work length will be stored into work[0]. +func (impl Implementation) Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) { + checkMatrix(n, n, a, lda) + if len(ipiv) < n { + panic(badIpiv) + } + nb := impl.Ilaenv(1, "DGETRI", " ", n, -1, -1, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return true + } + if lwork < n { + panic(badWork) + } + if len(work) < lwork { + panic(badWork) + } + if n == 0 { + return true + } + ok = impl.Dtrtri(blas.Upper, blas.NonUnit, n, a, lda) + if !ok { + return false + } + nbmin := 2 + ldwork := nb + if nb > 1 && nb < n { + iws := max(ldwork*n, 1) + if lwork < iws { + nb = lwork / ldwork + nbmin = max(2, impl.Ilaenv(2, "DGETRI", " ", n, -1, -1, -1)) + } + } + bi := blas64.Implementation() + // TODO(btracey): Replace this with a more row-major oriented algorithm. + if nb < nbmin || nb >= n { + // Unblocked code. + for j := n - 1; j >= 0; j-- { + for i := j + 1; i < n; i++ { + work[i*ldwork] = a[i*lda+j] + a[i*lda+j] = 0 + } + if j < n { + bi.Dgemv(blas.NoTrans, n, n-j-1, -1, a[(j+1):], lda, work[(j+1)*ldwork:], ldwork, 1, a[j:], lda) + } + } + } else { + nn := ((n - 1) / nb) * nb + for j := nn; j >= 0; j -= nb { + jb := min(nb, n-j) + for jj := j; jj < j+jb-1; jj++ { + for i := jj + 1; i < n; i++ { + work[i*ldwork+(jj-j)] = a[i*lda+jj] + a[i*lda+jj] = 0 + } + } + if j+jb < n { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, jb, n-j-jb, -1, a[(j+jb):], lda, work[(j+jb)*ldwork:], ldwork, 1, a[j:], lda) + bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, jb, 1, work[j*ldwork:], ldwork, a[j:], lda) + } + } + } + for j := n - 2; j >= 0; j-- { + jp := ipiv[j] + if jp != j { + bi.Dswap(n, a[j:], lda, a[jp:], lda) + } + } + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go new file mode 100644 index 00000000..da7e0c63 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go @@ -0,0 +1,55 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetrs solves a system of equations using an LU factorization. +// The system of equations solved is +// A * X = B if trans == blas.Trans +// A^T * X = B if trans == blas.NoTrans +// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. +// +// On entry b contains the elements of the matrix B. On exit, b contains the +// elements of X, the solution to the system of equations. +// +// a and ipiv contain the LU factorization of A and the permutation indices as +// computed by Dgetrf. ipiv is zero-indexed. +func (impl Implementation) Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) { + checkMatrix(n, n, a, lda) + checkMatrix(n, nrhs, b, ldb) + if len(ipiv) < n { + panic(badIpiv) + } + if n == 0 || nrhs == 0 { + return + } + if trans != blas.Trans && trans != blas.NoTrans { + panic(badTrans) + } + bi := blas64.Implementation() + if trans == blas.NoTrans { + // Solve A * X = B. + impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, 1) + // Solve L * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, + n, nrhs, 1, a, lda, b, ldb) + // Solve U * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Upper, blas.NoTrans, blas.NonUnit, + n, nrhs, 1, a, lda, b, ldb) + return + } + // Solve A^T * X = B. + // Solve U^T * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, + n, nrhs, 1, a, lda, b, ldb) + // Solve L^T * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Lower, blas.Trans, blas.Unit, + n, nrhs, 1, a, lda, b, ldb) + impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, -1) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go new file mode 100644 index 00000000..be6e8da7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go @@ -0,0 +1,230 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dggsvd3 computes the generalized singular value decomposition (GSVD) +// of an m×n matrix A and p×n matrix B: +// U^T*A*Q = D1*[ 0 R ] +// +// V^T*B*Q = D2*[ 0 R ] +// where U, V and Q are orthogonal matrices. +// +// Dggsvd3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and +// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following +// structures, respectively: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// Dggsvd3 computes C, S, R, and optionally the orthogonal transformation +// matrices U, V and Q. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// alpha and beta must have length n or Dggsvd3 will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, iwork contains the permutation required to sort alpha descending. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than n, otherwise Dggsvd3 will panic. If +// lwork is -1, work[0] holds the optimal lwork on return, but Dggsvd3 does +// not perform the GSVD. +func (impl Implementation) Dggsvd3(jobU, jobV, jobQ lapack.GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) { + checkMatrix(m, n, a, lda) + checkMatrix(p, n, b, ldb) + + wantu := jobU == lapack.GSVDU + if wantu { + checkMatrix(m, m, u, ldu) + } else if jobU != lapack.GSVDNone { + panic(badGSVDJob + "U") + } + wantv := jobV == lapack.GSVDV + if wantv { + checkMatrix(p, p, v, ldv) + } else if jobV != lapack.GSVDNone { + panic(badGSVDJob + "V") + } + wantq := jobQ == lapack.GSVDQ + if wantq { + checkMatrix(n, n, q, ldq) + } else if jobQ != lapack.GSVDNone { + panic(badGSVDJob + "Q") + } + + if len(alpha) != n { + panic(badAlpha) + } + if len(beta) != n { + panic(badBeta) + } + + if lwork != -1 && lwork <= n { + panic(badWork) + } + if len(work) < max(1, lwork) { + panic(shortWork) + } + if len(iwork) < n { + panic(badWork) + } + + // Determine optimal work length. + impl.Dggsvp3(jobU, jobV, jobQ, + m, p, n, + a, lda, + b, ldb, + 0, 0, + u, ldu, + v, ldv, + q, ldq, + iwork, + work, work, -1) + lwkopt := n + int(work[0]) + lwkopt = max(lwkopt, 2*n) + lwkopt = max(lwkopt, 1) + work[0] = float64(lwkopt) + if lwork == -1 { + return 0, 0, true + } + + // Compute the Frobenius norm of matrices A and B. + anorm := impl.Dlange(lapack.NormFrob, m, n, a, lda, nil) + bnorm := impl.Dlange(lapack.NormFrob, p, n, b, ldb, nil) + + // Get machine precision and set up threshold for determining + // the effective numerical rank of the matrices A and B. + tola := float64(max(m, n)) * math.Max(anorm, dlamchS) * dlamchP + tolb := float64(max(p, n)) * math.Max(bnorm, dlamchS) * dlamchP + + // Preprocessing. + k, l = impl.Dggsvp3(jobU, jobV, jobQ, + m, p, n, + a, lda, + b, ldb, + tola, tolb, + u, ldu, + v, ldv, + q, ldq, + iwork, + work[:n], work[n:], lwork-n) + + // Compute the GSVD of two upper "triangular" matrices. + _, ok = impl.Dtgsja(jobU, jobV, jobQ, + m, p, n, + k, l, + a, lda, + b, ldb, + tola, tolb, + alpha, beta, + u, ldu, + v, ldv, + q, ldq, + work) + + // Sort the singular values and store the pivot indices in iwork + // Copy alpha to work, then sort alpha in work. + bi := blas64.Implementation() + bi.Dcopy(n, alpha, 1, work[:n], 1) + ibnd := min(l, m-k) + for i := 0; i < ibnd; i++ { + // Scan for largest alpha_{k+i}. + isub := i + smax := work[k+i] + for j := i + 1; j < ibnd; j++ { + if v := work[k+j]; v > smax { + isub = j + smax = v + } + } + if isub != i { + work[k+isub] = work[k+i] + work[k+i] = smax + iwork[k+i] = k + isub + } else { + iwork[k+i] = k + i + } + } + + work[0] = float64(lwkopt) + + return k, l, ok +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go new file mode 100644 index 00000000..19187968 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go @@ -0,0 +1,273 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dggsvp3 computes orthogonal matrices U, V and Q such that +// +// n-k-l k l +// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] +// +// n-k-l k l +// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] +// +// n-k-l k l +// V^T*B*Q = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] +// +// where the k×k matrix A12 and l×l matrix B13 are non-singular +// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, +// otherwise A23 is (m-k)×l upper trapezoidal. +// +// Dggsvp3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz +// iteration procedure. Generally, they are the same as used in the preprocessing +// step, for example, +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps. +// Where eps is the machine epsilon. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than zero, otherwise Dggsvp3 will panic. +// +// Dggsvp3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dggsvp3(jobU, jobV, jobQ lapack.GSVDJob, m, p, n int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, iwork []int, tau, work []float64, lwork int) (k, l int) { + const forward = true + + checkMatrix(m, n, a, lda) + checkMatrix(p, n, b, ldb) + + wantu := jobU == lapack.GSVDU + if !wantu && jobU != lapack.GSVDNone { + panic(badGSVDJob + "U") + } + if jobU != lapack.GSVDNone { + checkMatrix(m, m, u, ldu) + } + + wantv := jobV == lapack.GSVDV + if !wantv && jobV != lapack.GSVDNone { + panic(badGSVDJob + "V") + } + if jobV != lapack.GSVDNone { + checkMatrix(p, p, v, ldv) + } + + wantq := jobQ == lapack.GSVDQ + if !wantq && jobQ != lapack.GSVDNone { + panic(badGSVDJob + "Q") + } + if jobQ != lapack.GSVDNone { + checkMatrix(n, n, q, ldq) + } + + if len(iwork) != n { + panic(badWork) + } + if lwork != -1 && lwork < 1 { + panic(badWork) + } + if len(work) < max(1, lwork) { + panic(badWork) + } + + var lwkopt int + impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, -1) + lwkopt = int(work[0]) + if wantv { + lwkopt = max(lwkopt, p) + } + lwkopt = max(lwkopt, min(n, p)) + lwkopt = max(lwkopt, m) + if wantq { + lwkopt = max(lwkopt, n) + } + impl.Dgeqp3(m, n, a, lda, iwork, tau, work, -1) + lwkopt = max(lwkopt, int(work[0])) + lwkopt = max(1, lwkopt) + if lwork == -1 { + work[0] = float64(lwkopt) + return 0, 0 + } + + // tau check must come after lwkopt query since + // the Dggsvd3 call for lwkopt query may have + // lwork == -1, and tau is provided by work. + if len(tau) < n { + panic(badTau) + } + + // QR with column pivoting of B: B*P = V*[ S11 S12 ]. + // [ 0 0 ] + for i := range iwork[:n] { + iwork[i] = 0 + } + impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, lwork) + + // Update A := A*P. + impl.Dlapmt(forward, m, n, a, lda, iwork) + + // Determine the effective rank of matrix B. + for i := 0; i < min(p, n); i++ { + if math.Abs(b[i*ldb+i]) > tolb { + l++ + } + } + + if wantv { + // Copy the details of V, and form V. + impl.Dlaset(blas.All, p, p, 0, 0, v, ldv) + if p > 1 { + impl.Dlacpy(blas.Lower, p-1, min(p, n), b[ldb:], ldb, v[ldv:], ldv) + } + impl.Dorg2r(p, p, min(p, n), v, ldv, tau, work) + } + + // Clean up B. + for i := 1; i < l; i++ { + r := b[i*ldb : i*ldb+i] + for j := range r { + r[j] = 0 + } + } + if p > l { + impl.Dlaset(blas.All, p-l, n, 0, 0, b[l*ldb:], ldb) + } + + if wantq { + // Set Q = I and update Q := Q*P. + impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) + impl.Dlapmt(forward, n, n, q, ldq, iwork) + } + + if p >= l && n != l { + // RQ factorization of [ S11 S12 ]: [ S11 S12 ] = [ 0 S12 ]*Z. + impl.Dgerq2(l, n, b, ldb, tau, work) + + // Update A := A*Z^T. + impl.Dormr2(blas.Right, blas.Trans, m, n, l, b, ldb, tau, a, lda, work) + + if wantq { + // Update Q := Q*Z^T. + impl.Dormr2(blas.Right, blas.Trans, n, n, l, b, ldb, tau, q, ldq, work) + } + + // Clean up B. + impl.Dlaset(blas.All, l, n-l, 0, 0, b, ldb) + for i := 1; i < l; i++ { + r := b[i*ldb+n-l : i*ldb+i+n-l] + for j := range r { + r[j] = 0 + } + } + } + + // Let N-L L + // A = [ A11 A12 ] M, + // + // then the following does the complete QR decomposition of A11: + // + // A11 = U*[ 0 T12 ]*P1^T. + // [ 0 0 ] + for i := range iwork[:n-l] { + iwork[i] = 0 + } + impl.Dgeqp3(m, n-l, a, lda, iwork[:n-l], tau, work, lwork) + + // Determine the effective rank of A11. + for i := 0; i < min(m, n-l); i++ { + if math.Abs(a[i*lda+i]) > tola { + k++ + } + } + + // Update A12 := U^T*A12, where A12 = A[0:m, n-l:n]. + impl.Dorm2r(blas.Left, blas.Trans, m, l, min(m, n-l), a, lda, tau, a[n-l:], lda, work) + + if wantu { + // Copy the details of U, and form U. + impl.Dlaset(blas.All, m, m, 0, 0, u, ldu) + if m > 1 { + impl.Dlacpy(blas.Lower, m-1, min(m, n-l), a[lda:], lda, u[ldu:], ldu) + } + impl.Dorg2r(m, m, min(m, n-l), u, ldu, tau, work) + } + + if wantq { + // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*P1. + impl.Dlapmt(forward, n, n-l, q, ldq, iwork[:n-l]) + } + + // Clean up A: set the strictly lower triangular part of + // A[0:k, 0:k] = 0, and A[k:m, 0:n-l] = 0. + for i := 1; i < k; i++ { + r := a[i*lda : i*lda+i] + for j := range r { + r[j] = 0 + } + } + if m > k { + impl.Dlaset(blas.All, m-k, n-l, 0, 0, a[k*lda:], lda) + } + + if n-l > k { + // RQ factorization of [ T11 T12 ] = [ 0 T12 ]*Z1. + impl.Dgerq2(k, n-l, a, lda, tau, work) + + if wantq { + // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*Z1^T. + impl.Dorm2r(blas.Right, blas.Trans, n, n-l, k, a, lda, tau, q, ldq, work) + } + + // Clean up A. + impl.Dlaset(blas.All, k, n-l-k, 0, 0, a, lda) + for i := 1; i < k; i++ { + r := a[i*lda+n-k-l : i*lda+i+n-k-l] + for j := range r { + a[j] = 0 + } + } + } + + if m > k { + // QR factorization of A[k:m, n-l:n]. + impl.Dgeqr2(m-k, l, a[k*lda+n-l:], lda, tau, work) + if wantu { + // Update U[:, k:m) := U[:, k:m]*U1. + impl.Dorm2r(blas.Right, blas.NoTrans, m, m-k, min(m-k, l), a[k*lda+n-l:], lda, tau, u[k:], ldu, work) + } + + // Clean up A. + for i := k + 1; i < m; i++ { + r := a[i*lda+n-l : i*lda+min(n-l+i-k, n)] + for j := range r { + r[j] = 0 + } + } + } + + work[0] = float64(lwkopt) + return k, l +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go new file mode 100644 index 00000000..c9deee90 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go @@ -0,0 +1,257 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dhseqr computes the eigenvalues of an n×n Hessenberg matrix H and, +// optionally, the matrices T and Z from the Schur decomposition +// H = Z T Z^T, +// where T is an n×n upper quasi-triangular matrix (the Schur form), and Z is +// the n×n orthogonal matrix of Schur vectors. +// +// Optionally Z may be postmultiplied into an input orthogonal matrix Q so that +// this routine can give the Schur factorization of a matrix A which has been +// reduced to the Hessenberg form H by the orthogonal matrix Q: +// A = Q H Q^T = (QZ) T (QZ)^T. +// +// If job == lapack.EigenvaluesOnly, only the eigenvalues will be computed. +// If job == lapack.EigenvaluesAndSchur, the eigenvalues and the Schur form T will +// be computed. +// For other values of job Dhseqr will panic. +// +// If compz == lapack.None, no Schur vectors will be computed and Z will not be +// referenced. +// If compz == lapack.HessEV, on return Z will contain the matrix of Schur +// vectors of H. +// If compz == lapack.OriginalEV, on entry z is assumed to contain the orthogonal +// matrix Q that is the identity except for the submatrix +// Q[ilo:ihi+1,ilo:ihi+1]. On return z will be updated to the product Q*Z. +// +// ilo and ihi determine the block of H on which Dhseqr operates. It is assumed +// that H is already upper triangular in rows and columns [0:ilo] and [ihi+1:n], +// although it will be only checked that the block is isolated, that is, +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// and Dhseqr will panic otherwise. ilo and ihi are typically set by a previous +// call to Dgebal, otherwise they should be set to 0 and n-1, respectively. It +// must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0. +// +// wr and wi must have length n. +// +// work must have length at least lwork and lwork must be at least max(1,n) +// otherwise Dhseqr will panic. The minimum lwork delivers very good and +// sometimes optimal performance, although lwork as large as 11*n may be +// required. On return, work[0] will contain the optimal value of lwork. +// +// If lwork is -1, instead of performing Dhseqr, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// unconverged indicates whether Dhseqr computed all the eigenvalues. +// +// If unconverged == 0, all the eigenvalues have been computed and their real +// and imaginary parts will be stored on return in wr and wi, respectively. If +// two eigenvalues are computed as a complex conjugate pair, they are stored in +// consecutive elements of wr and wi, say the i-th and (i+1)th, with wi[i] > 0 +// and wi[i+1] < 0. +// +// If unconverged == 0 and job == lapack.EigenvaluesAndSchur, on return H will +// contain the upper quasi-triangular matrix T from the Schur decomposition (the +// Schur form). 2×2 diagonal blocks (corresponding to complex conjugate pairs of +// eigenvalues) will be returned in standard form, with +// H[i,i] == H[i+1,i+1], +// and +// H[i+1,i]*H[i,i+1] < 0. +// The eigenvalues will be stored in wr and wi in the same order as on the +// diagonal of the Schur form returned in H, with +// wr[i] = H[i,i], +// and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, +// wi[i] = sqrt(-H[i+1,i]*H[i,i+1]), +// wi[i+1] = -wi[i]. +// +// If unconverged == 0 and job == lapack.EigenvaluesOnly, the contents of h +// on return is unspecified. +// +// If unconverged > 0, some eigenvalues have not converged, and the blocks +// [0:ilo] and [unconverged:n] of wr and wi will contain those eigenvalues which +// have been successfully computed. Failures are rare. +// +// If unconverged > 0 and job == lapack.EigenvaluesOnly, on return the +// remaining unconverged eigenvalues are the eigenvalues of the upper Hessenberg +// matrix H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged > 0 and job == lapack.EigenvaluesAndSchur, then on +// return +// (initial H) U = U (final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged > 0 and compz == lapack.OriginalEV, then on return +// (final Z) = (initial Z) U, +// where U is the orthogonal matrix in (*) regardless of the value of job. +// +// If unconverged > 0 and compz == lapack.HessEV, then on return +// (final Z) = U, +// where U is the orthogonal matrix in (*) regardless of the value of job. +// +// References: +// [1] R. Byers. LAPACK 3.1 xHSEQR: Tuning and Implementation Notes on the +// Small Bulge Multi-Shift QR Algorithm with Aggressive Early Deflation. +// LAPACK Working Note 187 (2007) +// URL: http://www.netlib.org/lapack/lawnspdf/lawn187.pdf +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [3] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// Dhseqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dhseqr(job lapack.EVJob, compz lapack.EVComp, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, z []float64, ldz int, work []float64, lwork int) (unconverged int) { + var wantt bool + switch job { + default: + panic(badEVJob) + case lapack.EigenvaluesOnly: + case lapack.EigenvaluesAndSchur: + wantt = true + } + var wantz bool + switch compz { + default: + panic(badEVComp) + case lapack.None: + case lapack.HessEV, lapack.OriginalEV: + wantz = true + } + switch { + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case len(work) < lwork: + panic(shortWork) + case lwork < max(1, n) && lwork != -1: + panic(badWork) + } + if lwork != -1 { + checkMatrix(n, n, h, ldh) + switch { + case wantz: + checkMatrix(n, n, z, ldz) + case len(wr) < n: + panic("lapack: wr has insufficient length") + case len(wi) < n: + panic("lapack: wi has insufficient length") + } + } + + const ( + // Matrices of order ntiny or smaller must be processed by + // Dlahqr because of insufficient subdiagonal scratch space. + // This is a hard limit. + ntiny = 11 + + // nl is the size of a local workspace to help small matrices + // through a rare Dlahqr failure. nl > ntiny is required and + // nl <= nmin = Ilaenv(ispec=12,...) is recommended (the default + // value of nmin is 75). Using nl = 49 allows up to six + // simultaneous shifts and a 16×16 deflation window. + nl = 49 + ) + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + // Quick return in case of a workspace query. + if lwork == -1 { + impl.Dlaqr04(wantt, wantz, n, ilo, ihi, nil, 0, nil, nil, ilo, ihi, nil, 0, work, -1, 1) + work[0] = math.Max(float64(n), work[0]) + return 0 + } + + // Copy eigenvalues isolated by Dgebal. + for i := 0; i < ilo; i++ { + wr[i] = h[i*ldh+i] + wi[i] = 0 + } + for i := ihi + 1; i < n; i++ { + wr[i] = h[i*ldh+i] + wi[i] = 0 + } + + // Initialize Z to identity matrix if requested. + if compz == lapack.HessEV { + impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) + } + + // Quick return if possible. + if ilo == ihi { + wr[ilo] = h[ilo*ldh+ilo] + wi[ilo] = 0 + return 0 + } + + // Dlahqr/Dlaqr04 crossover point. + nmin := impl.Ilaenv(12, "DHSEQR", string(job)+string(compz), n, ilo, ihi, lwork) + nmin = max(ntiny, nmin) + + if n > nmin { + // Dlaqr0 for big matrices. + unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], + ilo, ihi, z, ldz, work, lwork, 1) + } else { + // Dlahqr for small matrices. + unconverged = impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], + ilo, ihi, z, ldz) + if unconverged > 0 { + // A rare Dlahqr failure! Dlaqr04 sometimes succeeds + // when Dlahqr fails. + kbot := unconverged + if n >= nl { + // Larger matrices have enough subdiagonal + // scratch space to call Dlaqr04 directly. + unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, kbot, h, ldh, + wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, work, lwork, 1) + } else { + // Tiny matrices don't have enough subdiagonal + // scratch space to benefit from Dlaqr04. Hence, + // tiny matrices must be copied into a larger + // array before calling Dlaqr04. + var hl [nl * nl]float64 + impl.Dlacpy(blas.All, n, n, h, ldh, hl[:], nl) + impl.Dlaset(blas.All, nl, nl-n, 0, 0, hl[n:], nl) + var workl [nl]float64 + unconverged = impl.Dlaqr04(wantt, wantz, nl, ilo, kbot, hl[:], nl, + wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, workl[:], nl, 1) + work[0] = workl[0] + if wantt || unconverged > 0 { + impl.Dlacpy(blas.All, n, n, hl[:], nl, h, ldh) + } + } + } + } + // Zero out under the first subdiagonal, if necessary. + if (wantt || unconverged > 0) && n > 2 { + impl.Dlaset(blas.Lower, n-2, n-2, 0, 0, h[2*ldh:], ldh) + } + + work[0] = math.Max(float64(n), work[0]) + return unconverged +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go new file mode 100644 index 00000000..0527ebef --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go @@ -0,0 +1,150 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlabrd reduces the first NB rows and columns of a real general m×n matrix +// A to upper or lower bidiagonal form by an orthogonal transformation +// Q**T * A * P +// If m >= n, A is reduced to upper bidiagonal form and upon exit the elements +// on and below the diagonal in the first nb columns represent the elementary +// reflectors, and the elements above the diagonal in the first nb rows represent +// the matrix P. If m < n, A is reduced to lower bidiagonal form and the elements +// P is instead stored above the diagonal. +// +// The reduction to bidiagonal form is stored in d and e, where d are the diagonal +// elements, and e are the off-diagonal elements. +// +// The matrices Q and P are products of elementary reflectors +// Q = H_0 * H_1 * ... * H_{nb-1} +// P = G_0 * G_1 * ... * G_{nb-1} +// where +// H_i = I - tauQ[i] * v_i * v_i^T +// G_i = I - tauP[i] * u_i * u_i^T +// +// As an example, on exit the entries of A when m = 6, n = 5, and nb = 2 +// [ 1 1 u1 u1 u1] +// [v1 1 1 u2 u2] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// and when m = 5, n = 6, and nb = 2 +// [ 1 u1 u1 u1 u1 u1] +// [ 1 1 u2 u2 u2 u2] +// [v1 1 a a a a] +// [v1 v2 a a a a] +// [v1 v2 a a a a] +// +// Dlabrd also returns the matrices X and Y which are used with U and V to +// apply the transformation to the unreduced part of the matrix +// A := A - V*Y^T - X*U^T +// and returns the matrices X and Y which are needed to apply the +// transformation to the unreduced part of A. +// +// X is an m×nb matrix, Y is an n×nb matrix. d, e, taup, and tauq must all have +// length at least nb. Dlabrd will panic if these size constraints are violated. +// +// Dlabrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlabrd(m, n, nb int, a []float64, lda int, d, e, tauQ, tauP, x []float64, ldx int, y []float64, ldy int) { + checkMatrix(m, n, a, lda) + checkMatrix(m, nb, x, ldx) + checkMatrix(n, nb, y, ldy) + if len(d) < nb { + panic(badD) + } + if len(e) < nb { + panic(badE) + } + if len(tauQ) < nb { + panic(badTauQ) + } + if len(tauP) < nb { + panic(badTauP) + } + if m <= 0 || n <= 0 { + return + } + bi := blas64.Implementation() + if m >= n { + // Reduce to upper bidiagonal form. + for i := 0; i < nb; i++ { + bi.Dgemv(blas.NoTrans, m-i, i, -1, a[i*lda:], lda, y[i*ldy:], 1, 1, a[i*lda+i:], lda) + bi.Dgemv(blas.NoTrans, m-i, i, -1, x[i*ldx:], ldx, a[i:], lda, 1, a[i*lda+i:], lda) + + a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) + d[i] = a[i*lda+i] + if i < n-1 { + // Compute Y[i+1:n, i]. + a[i*lda+i] = 1 + bi.Dgemv(blas.Trans, m-i, n-i-1, 1, a[i*lda+i+1:], lda, a[i*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i, i, 1, a[i*lda:], lda, a[i*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i, i, 1, x[i*ldx:], ldx, a[i*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) + + // Update A[i, i+1:n]. + bi.Dgemv(blas.NoTrans, n-i-1, i+1, -1, y[(i+1)*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i+1:], 1) + bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, x[i*ldx:], 1, 1, a[i*lda+i+1:], 1) + + // Generate reflection P[i] to annihilate A[i, i+2:n]. + a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) + e[i] = a[i*lda+i+1] + a[i*lda+i+1] = 1 + + // Compute X[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[i*lda+i+1:], 1, 0, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.Trans, n-i-1, i+1, 1, y[(i+1)*ldy:], ldy, a[i*lda+i+1:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.NoTrans, i, n-i-1, 1, a[i+1:], lda, a[i*lda+i+1:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) + } + } + return + } + // Reduce to lower bidiagonal form. + for i := 0; i < nb; i++ { + // Update A[i,i:n] + bi.Dgemv(blas.NoTrans, n-i, i, -1, y[i*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i:], 1) + bi.Dgemv(blas.Trans, i, n-i, -1, a[i:], lda, x[i*ldx:], 1, 1, a[i*lda+i:], 1) + + // Generate reflection P[i] to annihilate A[i, i+1:n] + a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + d[i] = a[i*lda+i] + if i < m-1 { + a[i*lda+i] = 1 + // Compute X[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, n-i, 1, a[(i+1)*lda+i:], lda, a[i*lda+i:], 1, 0, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.Trans, n-i, i, 1, y[i*ldy:], ldy, a[i*lda+i:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.NoTrans, i, n-i, 1, a[i:], lda, a[i*lda+i:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) + + // Update A[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, y[i*ldy:], 1, 1, a[(i+1)*lda+i:], lda) + bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, x[(i+1)*ldx:], ldx, a[i:], lda, 1, a[(i+1)*lda+i:], lda) + + // Generate reflection Q[i] to annihilate A[i+2:m, i]. + a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + a[(i+1)*lda+i] = 1 + + // Compute Y[i+1:n, i]. + bi.Dgemv(blas.Trans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i-1, i, 1, a[(i+1)*lda:], lda, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i-1, i+1, 1, x[(i+1)*ldx:], ldx, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.Trans, i+1, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go new file mode 100644 index 00000000..751d5caa --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlacn2 estimates the 1-norm of an n×n matrix A using sequential updates with +// matrix-vector products provided externally. +// +// Dlacn2 is called sequentially and it returns the value of est and kase to be +// used on the next call. +// On the initial call, kase must be 0. +// In between calls, x must be overwritten by +// A * X if kase was returned as 1, +// A^T * X if kase was returned as 2, +// and all other parameters must not be changed. +// On the final return, kase is returned as 0, v contains A*W where W is a +// vector, and est = norm(V)/norm(W) is a lower bound for 1-norm of A. +// +// v, x, and isgn must all have length n and n must be at least 1, otherwise +// Dlacn2 will panic. isave is used for temporary storage. +// +// Dlacn2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlacn2(n int, v, x []float64, isgn []int, est float64, kase int, isave *[3]int) (float64, int) { + if n < 1 { + panic("lapack: non-positive n") + } + checkVector(n, x, 1) + checkVector(n, v, 1) + if len(isgn) < n { + panic("lapack: insufficient isgn length") + } + if isave[0] < 0 || isave[0] > 5 { + panic("lapack: bad isave value") + } + if isave[0] == 0 && kase != 0 { + panic("lapack: bad isave value") + } + itmax := 5 + bi := blas64.Implementation() + if kase == 0 { + for i := 0; i < n; i++ { + x[i] = 1 / float64(n) + } + kase = 1 + isave[0] = 1 + return est, kase + } + switch isave[0] { + default: + panic("unreachable") + case 1: + if n == 1 { + v[0] = x[0] + est = math.Abs(v[0]) + kase = 0 + return est, kase + } + est = bi.Dasum(n, x, 1) + for i := 0; i < n; i++ { + x[i] = math.Copysign(1, x[i]) + isgn[i] = int(x[i]) + } + kase = 2 + isave[0] = 2 + return est, kase + case 2: + isave[1] = bi.Idamax(n, x, 1) + isave[2] = 2 + for i := 0; i < n; i++ { + x[i] = 0 + } + x[isave[1]] = 1 + kase = 1 + isave[0] = 3 + return est, kase + case 3: + bi.Dcopy(n, x, 1, v, 1) + estold := est + est = bi.Dasum(n, v, 1) + sameSigns := true + for i := 0; i < n; i++ { + if int(math.Copysign(1, x[i])) != isgn[i] { + sameSigns = false + break + } + } + if !sameSigns && est > estold { + for i := 0; i < n; i++ { + x[i] = math.Copysign(1, x[i]) + isgn[i] = int(x[i]) + } + kase = 2 + isave[0] = 4 + return est, kase + } + case 4: + jlast := isave[1] + isave[1] = bi.Idamax(n, x, 1) + if x[jlast] != math.Abs(x[isave[1]]) && isave[2] < itmax { + isave[2] += 1 + for i := 0; i < n; i++ { + x[i] = 0 + } + x[isave[1]] = 1 + kase = 1 + isave[0] = 3 + return est, kase + } + case 5: + tmp := 2 * (bi.Dasum(n, x, 1)) / float64(3*n) + if tmp > est { + bi.Dcopy(n, x, 1, v, 1) + est = tmp + } + kase = 0 + return est, kase + } + // Iteration complete. Final stage + altsgn := 1.0 + for i := 0; i < n; i++ { + x[i] = altsgn * (1 + float64(i)/float64(n-1)) + altsgn *= -1 + } + kase = 1 + isave[0] = 5 + return est, kase +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go new file mode 100644 index 00000000..2fe952d5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go @@ -0,0 +1,40 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlacpy copies the elements of A specified by uplo into B. Uplo can specify +// a triangular portion with blas.Upper or blas.Lower, or can specify all of the +// elemest with blas.All. +// +// Dlacpy is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlacpy(uplo blas.Uplo, m, n int, a []float64, lda int, b []float64, ldb int) { + checkMatrix(m, n, a, lda) + checkMatrix(m, n, b, ldb) + switch uplo { + default: + panic(badUplo) + case blas.Upper: + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + + case blas.Lower: + for i := 0; i < m; i++ { + for j := 0; j < min(i+1, n); j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + case blas.All: + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go new file mode 100644 index 00000000..c071fec7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go @@ -0,0 +1,49 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlae2 computes the eigenvalues of a 2×2 symmetric matrix +// [a b] +// [b c] +// and returns the eigenvalue with the larger absolute value as rt1 and the +// smaller as rt2. +// +// Dlae2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlae2(a, b, c float64) (rt1, rt2 float64) { + sm := a + c + df := a - c + adf := math.Abs(df) + tb := b + b + ab := math.Abs(tb) + acmx := c + acmn := a + if math.Abs(a) > math.Abs(c) { + acmx = a + acmn = c + } + var rt float64 + if adf > ab { + rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) + } else if adf < ab { + rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) + } else { + rt = ab * math.Sqrt2 + } + if sm < 0 { + rt1 = 0.5 * (sm - rt) + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + return rt1, rt2 + } + if sm > 0 { + rt1 = 0.5 * (sm + rt) + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + return rt1, rt2 + } + rt1 = 0.5 * rt + rt2 = -0.5 * rt + return rt1, rt2 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go new file mode 100644 index 00000000..74d75b91 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go @@ -0,0 +1,82 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaev2 computes the Eigen decomposition of a symmetric 2×2 matrix. +// The matrix is given by +// [a b] +// [b c] +// Dlaev2 returns rt1 and rt2, the eigenvalues of the matrix where |RT1| > |RT2|, +// and [cs1, sn1] which is the unit right eigenvalue for RT1. +// [ cs1 sn1] [a b] [cs1 -sn1] = [rt1 0] +// [-sn1 cs1] [b c] [sn1 cs1] [ 0 rt2] +// +// Dlaev2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaev2(a, b, c float64) (rt1, rt2, cs1, sn1 float64) { + sm := a + c + df := a - c + adf := math.Abs(df) + tb := b + b + ab := math.Abs(tb) + acmx := c + acmn := a + if math.Abs(a) > math.Abs(c) { + acmx = a + acmn = c + } + var rt float64 + if adf > ab { + rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) + } else if adf < ab { + rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) + } else { + rt = ab * math.Sqrt(2) + } + var sgn1 float64 + if sm < 0 { + rt1 = 0.5 * (sm - rt) + sgn1 = -1 + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + } else if sm > 0 { + rt1 = 0.5 * (sm + rt) + sgn1 = 1 + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + } else { + rt1 = 0.5 * rt + rt2 = -0.5 * rt + sgn1 = 1 + } + var cs, sgn2 float64 + if df >= 0 { + cs = df + rt + sgn2 = 1 + } else { + cs = df - rt + sgn2 = -1 + } + acs := math.Abs(cs) + if acs > ab { + ct := -tb / cs + sn1 = 1 / math.Sqrt(1+ct*ct) + cs1 = ct * sn1 + } else { + if ab == 0 { + cs1 = 1 + sn1 = 0 + } else { + tn := -cs / tb + cs1 = 1 / math.Sqrt(1+tn*tn) + sn1 = tn * cs1 + } + } + if sgn1 == sgn2 { + tn := cs1 + cs1 = -sn1 + sn1 = tn + } + return rt1, rt2, cs1, sn1 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go new file mode 100644 index 00000000..8ffe2eba --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go @@ -0,0 +1,261 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlaexc swaps two adjacent diagonal blocks of order 1 or 2 in an n×n upper +// quasi-triangular matrix T by an orthogonal similarity transformation. +// +// T must be in Schur canonical form, that is, block upper triangular with 1×1 +// and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal elements +// equal and its off-diagonal elements of opposite sign. On return, T will +// contain the updated matrix again in Schur canonical form. +// +// If wantq is true, the transformation is accumulated in the n×n matrix Q, +// otherwise Q is not referenced. +// +// j1 is the index of the first row of the first block. n1 and n2 are the order +// of the first and second block, respectively. +// +// work must have length at least n, otherwise Dlaexc will panic. +// +// If ok is false, the transformed matrix T would be too far from Schur form. +// The blocks are not swapped, and T and Q are not modified. +// +// If n1 and n2 are both equal to 1, Dlaexc will always return true. +// +// Dlaexc is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaexc(wantq bool, n int, t []float64, ldt int, q []float64, ldq int, j1, n1, n2 int, work []float64) (ok bool) { + checkMatrix(n, n, t, ldt) + if wantq { + checkMatrix(n, n, q, ldq) + } + if j1 < 0 || n <= j1 { + panic("lapack: index j1 out of range") + } + if len(work) < n { + panic(badWork) + } + if n1 < 0 || 2 < n1 { + panic("lapack: invalid value of n1") + } + if n2 < 0 || 2 < n2 { + panic("lapack: invalid value of n2") + } + + if n == 0 || n1 == 0 || n2 == 0 { + return true + } + if j1+n1 >= n { + // TODO(vladimir-ch): Reference LAPACK does this check whether + // the start of the second block is in the matrix T. It returns + // true if it is not and moreover it does not check whether the + // whole second block fits into T. This does not feel + // satisfactory. The only caller of Dlaexc is Dtrexc, so if the + // caller makes sure that this does not happen, we could be + // stricter here. + return true + } + + j2 := j1 + 1 + j3 := j1 + 2 + + bi := blas64.Implementation() + + if n1 == 1 && n2 == 1 { + // Swap two 1×1 blocks. + t11 := t[j1*ldt+j1] + t22 := t[j2*ldt+j2] + + // Determine the transformation to perform the interchange. + cs, sn, _ := impl.Dlartg(t[j1*ldt+j2], t22-t11) + + // Apply transformation to the matrix T. + if n-j3 > 0 { + bi.Drot(n-j3, t[j1*ldt+j3:], 1, t[j2*ldt+j3:], 1, cs, sn) + } + if j1 > 0 { + bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) + } + + t[j1*ldt+j1] = t22 + t[j2*ldt+j2] = t11 + + if wantq { + // Accumulate transformation in the matrix Q. + bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) + } + + return true + } + + // Swapping involves at least one 2×2 block. + // + // Copy the diagonal block of order n1+n2 to the local array d and + // compute its norm. + nd := n1 + n2 + var d [16]float64 + const ldd = 4 + impl.Dlacpy(blas.All, nd, nd, t[j1*ldt+j1:], ldt, d[:], ldd) + dnorm := impl.Dlange(lapack.MaxAbs, nd, nd, d[:], ldd, work) + + // Compute machine-dependent threshold for test for accepting swap. + eps := dlamchP + thresh := math.Max(10*eps*dnorm, dlamchS/eps) + + // Solve T11*X - X*T22 = scale*T12 for X. + var x [4]float64 + const ldx = 2 + scale, _, _ := impl.Dlasy2(false, false, -1, n1, n2, d[:], ldd, d[n1*ldd+n1:], ldd, d[n1:], ldd, x[:], ldx) + + // Swap the adjacent diagonal blocks. + switch { + case n1 == 1 && n2 == 2: + // Generate elementary reflector H so that + // ( scale, X11, X12 ) H = ( 0, 0, * ) + u := [3]float64{scale, x[0], 1} + _, tau := impl.Dlarfg(3, x[1], u[:2], 1) + t11 := t[j1*ldt+j1] + + // Perform swap provisionally on diagonal block in d. + impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) + impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) + + // Test whether to reject swap. + if math.Max(math.Abs(d[2*ldd]), math.Max(math.Abs(d[2*ldd+1]), math.Abs(d[2*ldd+2]-t11))) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + impl.Dlarfx(blas.Left, 3, n-j1, u[:], tau, t[j1*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j2+1, 3, u[:], tau, t[j1:], ldt, work) + + t[j3*ldt+j1] = 0 + t[j3*ldt+j2] = 0 + t[j3*ldt+j3] = t11 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) + } + + case n1 == 2 && n2 == 1: + // Generate elementary reflector H so that: + // H ( -X11 ) = ( * ) + // ( -X21 ) = ( 0 ) + // ( scale ) = ( 0 ) + u := [3]float64{1, -x[ldx], scale} + _, tau := impl.Dlarfg(3, -x[0], u[1:], 1) + t33 := t[j3*ldt+j3] + + // Perform swap provisionally on diagonal block in D. + impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) + impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) + + // Test whether to reject swap. + if math.Max(math.Abs(d[ldd]), math.Max(math.Abs(d[2*ldd]), math.Abs(d[0]-t33))) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + impl.Dlarfx(blas.Right, j3+1, 3, u[:], tau, t[j1:], ldt, work) + impl.Dlarfx(blas.Left, 3, n-j1-1, u[:], tau, t[j1*ldt+j2:], ldt, work) + + t[j1*ldt+j1] = t33 + t[j2*ldt+j1] = 0 + t[j3*ldt+j1] = 0 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) + } + + default: // n1 == 2 && n2 == 2 + // Generate elementary reflectors H_1 and H_2 so that: + // H_2 H_1 ( -X11 -X12 ) = ( * * ) + // ( -X21 -X22 ) ( 0 * ) + // ( scale 0 ) ( 0 0 ) + // ( 0 scale ) ( 0 0 ) + u1 := [3]float64{1, -x[ldx], scale} + _, tau1 := impl.Dlarfg(3, -x[0], u1[1:], 1) + + temp := -tau1 * (x[1] + u1[1]*x[ldx+1]) + u2 := [3]float64{1, -temp * u1[2], scale} + _, tau2 := impl.Dlarfg(3, -temp*u1[1]-x[ldx+1], u2[1:], 1) + + // Perform swap provisionally on diagonal block in D. + impl.Dlarfx(blas.Left, 3, 4, u1[:], tau1, d[:], ldd, work) + impl.Dlarfx(blas.Right, 4, 3, u1[:], tau1, d[:], ldd, work) + impl.Dlarfx(blas.Left, 3, 4, u2[:], tau2, d[ldd:], ldd, work) + impl.Dlarfx(blas.Right, 4, 3, u2[:], tau2, d[1:], ldd, work) + + // Test whether to reject swap. + m1 := math.Max(math.Abs(d[2*ldd]), math.Abs(d[2*ldd+1])) + m2 := math.Max(math.Abs(d[3*ldd]), math.Abs(d[3*ldd+1])) + if math.Max(m1, m2) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + j4 := j1 + 3 + impl.Dlarfx(blas.Left, 3, n-j1, u1[:], tau1, t[j1*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j4+1, 3, u1[:], tau1, t[j1:], ldt, work) + impl.Dlarfx(blas.Left, 3, n-j1, u2[:], tau2, t[j2*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j4+1, 3, u2[:], tau2, t[j2:], ldt, work) + + t[j3*ldt+j1] = 0 + t[j3*ldt+j2] = 0 + t[j4*ldt+j1] = 0 + t[j4*ldt+j2] = 0 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u1[:], tau1, q[j1:], ldq, work) + impl.Dlarfx(blas.Right, n, 3, u2[:], tau2, q[j2:], ldq, work) + } + } + + if n2 == 2 { + // Standardize new 2×2 block T11. + a, b := t[j1*ldt+j1], t[j1*ldt+j2] + c, d := t[j2*ldt+j1], t[j2*ldt+j2] + var cs, sn float64 + t[j1*ldt+j1], t[j1*ldt+j2], t[j2*ldt+j1], t[j2*ldt+j2], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) + if n-j1-2 > 0 { + bi.Drot(n-j1-2, t[j1*ldt+j1+2:], 1, t[j2*ldt+j1+2:], 1, cs, sn) + } + if j1 > 0 { + bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) + } + if wantq { + bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) + } + } + if n1 == 2 { + // Standardize new 2×2 block T22. + j3 := j1 + n2 + j4 := j3 + 1 + a, b := t[j3*ldt+j3], t[j3*ldt+j4] + c, d := t[j4*ldt+j3], t[j4*ldt+j4] + var cs, sn float64 + t[j3*ldt+j3], t[j3*ldt+j4], t[j4*ldt+j3], t[j4*ldt+j4], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) + if n-j3-2 > 0 { + bi.Drot(n-j3-2, t[j3*ldt+j3+2:], 1, t[j4*ldt+j3+2:], 1, cs, sn) + } + bi.Drot(j3, t[j3:], ldt, t[j4:], ldt, cs, sn) + if wantq { + bi.Drot(n, q[j3:], ldq, q[j4:], ldq, cs, sn) + } + } + + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go new file mode 100644 index 00000000..6954deb4 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go @@ -0,0 +1,182 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlags2 computes 2-by-2 orthogonal matrices U, V and Q with the +// triangles of A and B specified by upper. +// +// If upper is true +// +// U^T*A*Q = U^T*[ a1 a2 ]*Q = [ x 0 ] +// [ 0 a3 ] [ x x ] +// and +// V^T*B*Q = V^T*[ b1 b2 ]*Q = [ x 0 ] +// [ 0 b3 ] [ x x ] +// +// otherwise +// +// U^T*A*Q = U^T*[ a1 0 ]*Q = [ x x ] +// [ a2 a3 ] [ 0 x ] +// and +// V^T*B*Q = V^T*[ b1 0 ]*Q = [ x x ] +// [ b2 b3 ] [ 0 x ]. +// +// The rows of the transformed A and B are parallel, where +// +// U = [ csu snu ], V = [ csv snv ], Q = [ csq snq ] +// [ -snu csu ] [ -snv csv ] [ -snq csq ] +// +// Dlags2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlags2(upper bool, a1, a2, a3, b1, b2, b3 float64) (csu, snu, csv, snv, csq, snq float64) { + if upper { + // Input matrices A and B are upper triangular matrices. + // + // Form matrix C = A*adj(B) = [ a b ] + // [ 0 d ] + a := a1 * b3 + d := a3 * b1 + b := a2*b1 - a1*b2 + + // The SVD of real 2-by-2 triangular C. + // + // [ csl -snl ]*[ a b ]*[ csr snr ] = [ r 0 ] + // [ snl csl ] [ 0 d ] [ -snr csr ] [ 0 t ] + _, _, snr, csr, snl, csl := impl.Dlasv2(a, b, d) + + if math.Abs(csl) >= math.Abs(snl) || math.Abs(csr) >= math.Abs(snr) { + // Compute the [0, 0] and [0, 1] elements of U^T*A and V^T*B, + // and [0, 1] element of |U|^T*|A| and |V|^T*|B|. + + ua11r := csl * a1 + ua12 := csl*a2 + snl*a3 + + vb11r := csr * b1 + vb12 := csr*b2 + snr*b3 + + aua12 := math.Abs(csl)*math.Abs(a2) + math.Abs(snl)*math.Abs(a3) + avb12 := math.Abs(csr)*math.Abs(b2) + math.Abs(snr)*math.Abs(b3) + + // Zero [0, 1] elements of U^T*A and V^T*B. + if math.Abs(ua11r)+math.Abs(ua12) != 0 { + if aua12/(math.Abs(ua11r)+math.Abs(ua12)) <= avb12/(math.Abs(vb11r)+math.Abs(vb12)) { + csq, snq, _ = impl.Dlartg(-ua11r, ua12) + } else { + csq, snq, _ = impl.Dlartg(-vb11r, vb12) + } + } else { + csq, snq, _ = impl.Dlartg(-vb11r, vb12) + } + + csu = csl + snu = -snl + csv = csr + snv = -snr + } else { + // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, + // and [1, 1] element of |U|^T*|A| and |V|^T*|B|. + + ua21 := -snl * a1 + ua22 := -snl*a2 + csl*a3 + + vb21 := -snr * b1 + vb22 := -snr*b2 + csr*b3 + + aua22 := math.Abs(snl)*math.Abs(a2) + math.Abs(csl)*math.Abs(a3) + avb22 := math.Abs(snr)*math.Abs(b2) + math.Abs(csr)*math.Abs(b3) + + // Zero [1, 1] elements of U^T*A and V^T*B, and then swap. + if math.Abs(ua21)+math.Abs(ua22) != 0 { + if aua22/(math.Abs(ua21)+math.Abs(ua22)) <= avb22/(math.Abs(vb21)+math.Abs(vb22)) { + csq, snq, _ = impl.Dlartg(-ua21, ua22) + } else { + csq, snq, _ = impl.Dlartg(-vb21, vb22) + } + } else { + csq, snq, _ = impl.Dlartg(-vb21, vb22) + } + + csu = snl + snu = csl + csv = snr + snv = csr + } + } else { + // Input matrices A and B are lower triangular matrices + // + // Form matrix C = A*adj(B) = [ a 0 ] + // [ c d ] + a := a1 * b3 + d := a3 * b1 + c := a2*b3 - a3*b2 + + // The SVD of real 2-by-2 triangular C + // + // [ csl -snl ]*[ a 0 ]*[ csr snr ] = [ r 0 ] + // [ snl csl ] [ c d ] [ -snr csr ] [ 0 t ] + _, _, snr, csr, snl, csl := impl.Dlasv2(a, c, d) + + if math.Abs(csr) >= math.Abs(snr) || math.Abs(csl) >= math.Abs(snl) { + // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, + // and [1, 0] element of |U|^T*|A| and |V|^T*|B|. + + ua21 := -snr*a1 + csr*a2 + ua22r := csr * a3 + + vb21 := -snl*b1 + csl*b2 + vb22r := csl * b3 + + aua21 := math.Abs(snr)*math.Abs(a1) + math.Abs(csr)*math.Abs(a2) + avb21 := math.Abs(snl)*math.Abs(b1) + math.Abs(csl)*math.Abs(b2) + + // Zero [1, 0] elements of U^T*A and V^T*B. + if (math.Abs(ua21) + math.Abs(ua22r)) != 0 { + if aua21/(math.Abs(ua21)+math.Abs(ua22r)) <= avb21/(math.Abs(vb21)+math.Abs(vb22r)) { + csq, snq, _ = impl.Dlartg(ua22r, ua21) + } else { + csq, snq, _ = impl.Dlartg(vb22r, vb21) + } + } else { + csq, snq, _ = impl.Dlartg(vb22r, vb21) + } + + csu = csr + snu = -snr + csv = csl + snv = -snl + } else { + // Compute the [0, 0] and [0, 1] elements of U^T *A and V^T *B, + // and [0, 0] element of |U|^T*|A| and |V|^T*|B|. + + ua11 := csr*a1 + snr*a2 + ua12 := snr * a3 + + vb11 := csl*b1 + snl*b2 + vb12 := snl * b3 + + aua11 := math.Abs(csr)*math.Abs(a1) + math.Abs(snr)*math.Abs(a2) + avb11 := math.Abs(csl)*math.Abs(b1) + math.Abs(snl)*math.Abs(b2) + + // Zero [0, 0] elements of U^T*A and V^T*B, and then swap. + if (math.Abs(ua11) + math.Abs(ua12)) != 0 { + if aua11/(math.Abs(ua11)+math.Abs(ua12)) <= avb11/(math.Abs(vb11)+math.Abs(vb12)) { + csq, snq, _ = impl.Dlartg(ua12, ua11) + } else { + csq, snq, _ = impl.Dlartg(vb12, vb11) + } + } else { + csq, snq, _ = impl.Dlartg(vb12, vb11) + } + + csu = snr + snu = csr + csv = snl + snv = csl + } + } + + return csu, snu, csv, snv, csq, snq +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go new file mode 100644 index 00000000..66330ab8 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go @@ -0,0 +1,423 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlahqr computes the eigenvalues and Schur factorization of a block of an n×n +// upper Hessenberg matrix H, using the double-shift/single-shift QR algorithm. +// +// h and ldh represent the matrix H. Dlahqr works primarily with the Hessenberg +// submatrix H[ilo:ihi+1,ilo:ihi+1], but applies transformations to all of H if +// wantt is true. It is assumed that H[ihi+1:n,ihi+1:n] is already upper +// quasi-triangular, although this is not checked. +// +// It must hold that +// 0 <= ilo <= max(0,ihi), and ihi < n, +// and that +// H[ilo,ilo-1] == 0, if ilo > 0, +// otherwise Dlahqr will panic. +// +// If unconverged is zero on return, wr[ilo:ihi+1] and wi[ilo:ihi+1] will contain +// respectively the real and imaginary parts of the computed eigenvalues ilo +// to ihi. If two eigenvalues are computed as a complex conjugate pair, they are +// stored in consecutive elements of wr and wi, say the i-th and (i+1)th, with +// wi[i] > 0 and wi[i+1] < 0. If wantt is true, the eigenvalues are stored in +// the same order as on the diagonal of the Schur form returned in H, with +// wr[i] = H[i,i], and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, +// wi[i] = sqrt(abs(H[i+1,i]*H[i,i+1])) and wi[i+1] = -wi[i]. +// +// wr and wi must have length ihi+1. +// +// z and ldz represent an n×n matrix Z. If wantz is true, the transformations +// will be applied to the submatrix Z[iloz:ihiz+1,ilo:ihi+1] and it must hold that +// 0 <= iloz <= ilo, and ihi <= ihiz < n. +// If wantz is false, z is not referenced. +// +// unconverged indicates whether Dlahqr computed all the eigenvalues ilo to ihi +// in a total of 30 iterations per eigenvalue. +// +// If unconverged is zero, all the eigenvalues ilo to ihi have been computed and +// will be stored on return in wr[ilo:ihi+1] and wi[ilo:ihi+1]. +// +// If unconverged is zero and wantt is true, H[ilo:ihi+1,ilo:ihi+1] will be +// overwritten on return by upper quasi-triangular full Schur form with any +// 2×2 diagonal blocks in standard form. +// +// If unconverged is zero and if wantt is false, the contents of h on return is +// unspecified. +// +// If unconverged is positive, some eigenvalues have not converged, and +// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] contain those eigenvalues +// which have been successfully computed. +// +// If unconverged is positive and wantt is true, then on return +// (initial H)*U = U*(final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged is positive and wantt is false, on return the remaining +// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix +// H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged is positive and wantz is true, then on return +// (final Z) = (initial Z)*U, +// where U is the orthogonal matrix in (*) regardless of the value of wantt. +// +// Dlahqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlahqr(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int) (unconverged int) { + checkMatrix(n, n, h, ldh) + switch { + case ilo < 0 || max(0, ihi) < ilo: + panic(badIlo) + case n <= ihi: + panic(badIhi) + case len(wr) != ihi+1: + panic("lapack: bad length of wr") + case len(wi) != ihi+1: + panic("lapack: bad length of wi") + case ilo > 0 && h[ilo*ldh+ilo-1] != 0: + panic("lapack: block is not isolated") + } + if wantz { + checkMatrix(n, n, z, ldz) + switch { + case iloz < 0 || ilo < iloz: + panic("lapack: iloz out of range") + case ihiz < ihi || n <= ihiz: + panic("lapack: ihiz out of range") + } + } + + // Quick return if possible. + if n == 0 { + return 0 + } + if ilo == ihi { + wr[ilo] = h[ilo*ldh+ilo] + wi[ilo] = 0 + return 0 + } + + // Clear out the trash. + for j := ilo; j < ihi-2; j++ { + h[(j+2)*ldh+j] = 0 + h[(j+3)*ldh+j] = 0 + } + if ilo <= ihi-2 { + h[ihi*ldh+ihi-2] = 0 + } + + nh := ihi - ilo + 1 + nz := ihiz - iloz + 1 + + // Set machine-dependent constants for the stopping criterion. + ulp := dlamchP + smlnum := float64(nh) / ulp * dlamchS + + // i1 and i2 are the indices of the first row and last column of H to + // which transformations must be applied. If eigenvalues only are being + // computed, i1 and i2 are set inside the main loop. + var i1, i2 int + if wantt { + i1 = 0 + i2 = n - 1 + } + + itmax := 30 * max(10, nh) // Total number of QR iterations allowed. + + // The main loop begins here. i is the loop index and decreases from ihi + // to ilo in steps of 1 or 2. Each iteration of the loop works with the + // active submatrix in rows and columns l to i. Eigenvalues i+1 to ihi + // have already converged. Either l = ilo or H[l,l-1] is negligible so + // that the matrix splits. + bi := blas64.Implementation() + i := ihi + for i >= ilo { + l := ilo + + // Perform QR iterations on rows and columns ilo to i until a + // submatrix of order 1 or 2 splits off at the bottom because a + // subdiagonal element has become negligible. + converged := false + for its := 0; its <= itmax; its++ { + // Look for a single small subdiagonal element. + var k int + for k = i; k > l; k-- { + if math.Abs(h[k*ldh+k-1]) <= smlnum { + break + } + tst := math.Abs(h[(k-1)*ldh+k-1]) + math.Abs(h[k*ldh+k]) + if tst == 0 { + if k-2 >= ilo { + tst += math.Abs(h[(k-1)*ldh+k-2]) + } + if k+1 <= ihi { + tst += math.Abs(h[(k+1)*ldh+k]) + } + } + // The following is a conservative small + // subdiagonal deflation criterion due to Ahues + // & Tisseur (LAWN 122, 1997). It has better + // mathematical foundation and improves accuracy + // in some cases. + if math.Abs(h[k*ldh+k-1]) <= ulp*tst { + ab := math.Max(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) + ba := math.Min(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) + aa := math.Max(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) + bb := math.Min(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) + s := aa + ab + if ab/s*ba <= math.Max(smlnum, aa/s*bb*ulp) { + break + } + } + } + l = k + if l > ilo { + // H[l,l-1] is negligible. + h[l*ldh+l-1] = 0 + } + if l >= i-1 { + // Break the loop because a submatrix of order 1 + // or 2 has split off. + converged = true + break + } + + // Now the active submatrix is in rows and columns l to + // i. If eigenvalues only are being computed, only the + // active submatrix need be transformed. + if !wantt { + i1 = l + i2 = i + } + + const ( + dat1 = 3.0 + dat2 = -0.4375 + ) + var h11, h21, h12, h22 float64 + switch its { + case 10: // Exceptional shift. + s := math.Abs(h[(l+1)*ldh+l]) + math.Abs(h[(l+2)*ldh+l+1]) + h11 = dat1*s + h[l*ldh+l] + h12 = dat2 * s + h21 = s + h22 = h11 + case 20: // Exceptional shift. + s := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) + h11 = dat1*s + h[i*ldh+i] + h12 = dat2 * s + h21 = s + h22 = h11 + default: // Prepare to use Francis' double shift (i.e., + // 2nd degree generalized Rayleigh quotient). + h11 = h[(i-1)*ldh+i-1] + h21 = h[i*ldh+i-1] + h12 = h[(i-1)*ldh+i] + h22 = h[i*ldh+i] + } + s := math.Abs(h11) + math.Abs(h12) + math.Abs(h21) + math.Abs(h22) + var ( + rt1r, rt1i float64 + rt2r, rt2i float64 + ) + if s != 0 { + h11 /= s + h21 /= s + h12 /= s + h22 /= s + tr := (h11 + h22) / 2 + det := (h11-tr)*(h22-tr) - h12*h21 + rtdisc := math.Sqrt(math.Abs(det)) + if det >= 0 { + // Complex conjugate shifts. + rt1r = tr * s + rt2r = rt1r + rt1i = rtdisc * s + rt2i = -rt1i + } else { + // Real shifts (use only one of them). + rt1r = tr + rtdisc + rt2r = tr - rtdisc + if math.Abs(rt1r-h22) <= math.Abs(rt2r-h22) { + rt1r *= s + rt2r = rt1r + } else { + rt2r *= s + rt1r = rt2r + } + rt1i = 0 + rt2i = 0 + } + } + + // Look for two consecutive small subdiagonal elements. + var m int + var v [3]float64 + for m = i - 2; m >= l; m-- { + // Determine the effect of starting the + // double-shift QR iteration at row m, and see + // if this would make H[m,m-1] negligible. The + // following uses scaling to avoid overflows and + // most underflows. + h21s := h[(m+1)*ldh+m] + s := math.Abs(h[m*ldh+m]-rt2r) + math.Abs(rt2i) + math.Abs(h21s) + h21s /= s + v[0] = h21s*h[m*ldh+m+1] + (h[m*ldh+m]-rt1r)*((h[m*ldh+m]-rt2r)/s) - rt2i/s*rt1i + v[1] = h21s * (h[m*ldh+m] + h[(m+1)*ldh+m+1] - rt1r - rt2r) + v[2] = h21s * h[(m+2)*ldh+m+1] + s = math.Abs(v[0]) + math.Abs(v[1]) + math.Abs(v[2]) + v[0] /= s + v[1] /= s + v[2] /= s + if m == l { + break + } + dsum := math.Abs(h[(m-1)*ldh+m-1]) + math.Abs(h[m*ldh+m]) + math.Abs(h[(m+1)*ldh+m+1]) + if math.Abs(h[m*ldh+m-1])*(math.Abs(v[1])+math.Abs(v[2])) <= ulp*math.Abs(v[0])*dsum { + break + } + } + + // Double-shift QR step. + for k := m; k < i; k++ { + // The first iteration of this loop determines a + // reflection G from the vector V and applies it + // from left and right to H, thus creating a + // non-zero bulge below the subdiagonal. + // + // Each subsequent iteration determines a + // reflection G to restore the Hessenberg form + // in the (k-1)th column, and thus chases the + // bulge one step toward the bottom of the + // active submatrix. nr is the order of G. + + nr := min(3, i-k+1) + if k > m { + bi.Dcopy(nr, h[k*ldh+k-1:], ldh, v[:], 1) + } + var t0 float64 + v[0], t0 = impl.Dlarfg(nr, v[0], v[1:], 1) + if k > m { + h[k*ldh+k-1] = v[0] + h[(k+1)*ldh+k-1] = 0 + if k < i-1 { + h[(k+2)*ldh+k-1] = 0 + } + } else if m > l { + // Use the following instead of H[k,k-1] = -H[k,k-1] + // to avoid a bug when v[1] and v[2] underflow. + h[k*ldh+k-1] *= 1 - t0 + } + t1 := t0 * v[1] + if nr == 3 { + t2 := t0 * v[2] + + // Apply G from the left to transform + // the rows of the matrix in columns k + // to i2. + for j := k; j <= i2; j++ { + sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] + v[2]*h[(k+2)*ldh+j] + h[k*ldh+j] -= sum * t0 + h[(k+1)*ldh+j] -= sum * t1 + h[(k+2)*ldh+j] -= sum * t2 + } + + // Apply G from the right to transform + // the columns of the matrix in rows i1 + // to min(k+3,i). + for j := i1; j <= min(k+3, i); j++ { + sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] + v[2]*h[j*ldh+k+2] + h[j*ldh+k] -= sum * t0 + h[j*ldh+k+1] -= sum * t1 + h[j*ldh+k+2] -= sum * t2 + } + + if wantz { + // Accumulate transformations in the matrix Z. + for j := iloz; j <= ihiz; j++ { + sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] + v[2]*z[j*ldz+k+2] + z[j*ldz+k] -= sum * t0 + z[j*ldz+k+1] -= sum * t1 + z[j*ldz+k+2] -= sum * t2 + } + } + } else if nr == 2 { + // Apply G from the left to transform + // the rows of the matrix in columns k + // to i2. + for j := k; j <= i2; j++ { + sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] + h[k*ldh+j] -= sum * t0 + h[(k+1)*ldh+j] -= sum * t1 + } + + // Apply G from the right to transform + // the columns of the matrix in rows i1 + // to min(k+3,i). + for j := i1; j <= i; j++ { + sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] + h[j*ldh+k] -= sum * t0 + h[j*ldh+k+1] -= sum * t1 + } + + if wantz { + // Accumulate transformations in the matrix Z. + for j := iloz; j <= ihiz; j++ { + sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] + z[j*ldz+k] -= sum * t0 + z[j*ldz+k+1] -= sum * t1 + } + } + } + } + } + + if !converged { + // The QR iteration finished without splitting off a + // submatrix of order 1 or 2. + return i + 1 + } + + if l == i { + // H[i,i-1] is negligible: one eigenvalue has converged. + wr[i] = h[i*ldh+i] + wi[i] = 0 + } else if l == i-1 { + // H[i-1,i-2] is negligible: a pair of eigenvalues have converged. + + // Transform the 2×2 submatrix to standard Schur form, + // and compute and store the eigenvalues. + var cs, sn float64 + a, b := h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] + c, d := h[i*ldh+i-1], h[i*ldh+i] + a, b, c, d, wr[i-1], wi[i-1], wr[i], wi[i], cs, sn = impl.Dlanv2(a, b, c, d) + h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] = a, b + h[i*ldh+i-1], h[i*ldh+i] = c, d + + if wantt { + // Apply the transformation to the rest of H. + if i2 > i { + bi.Drot(i2-i, h[(i-1)*ldh+i+1:], 1, h[i*ldh+i+1:], 1, cs, sn) + } + bi.Drot(i-i1-1, h[i1*ldh+i-1:], ldh, h[i1*ldh+i:], ldh, cs, sn) + } + + if wantz { + // Apply the transformation to Z. + bi.Drot(nz, z[iloz*ldz+i-1:], ldz, z[iloz*ldz+i:], ldz, cs, sn) + } + } + + // Return to start of the main loop with new value of i. + i = l - 1 + } + return 0 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go new file mode 100644 index 00000000..9d23bc06 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go @@ -0,0 +1,169 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlahr2 reduces the first nb columns of a real general n×(n-k+1) matrix A so +// that elements below the k-th subdiagonal are zero. The reduction is performed +// by an orthogonal similarity transformation Q^T * A * Q. Dlahr2 returns the +// matrices V and T which determine Q as a block reflector I - V*T*V^T, and +// also the matrix Y = A * V * T. +// +// The matrix Q is represented as a product of nb elementary reflectors +// Q = H_0 * H_1 * ... * H_{nb-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T, +// where v is a real vector with v[0:i+k-1] = 0 and v[i+k-1] = 1. v[i+k:n] is +// stored on exit in A[i+k+1:n,i]. +// +// The elements of the vectors v together form the (n-k+1)×nb matrix +// V which is needed, with T and Y, to apply the transformation to the +// unreduced part of the matrix, using an update of the form +// A = (I - V*T*V^T) * (A - Y*V^T). +// +// On entry, a contains the n×(n-k+1) general matrix A. On return, the elements +// on and above the k-th subdiagonal in the first nb columns are overwritten +// with the corresponding elements of the reduced matrix; the elements below the +// k-th subdiagonal, with the slice tau, represent the matrix Q as a product of +// elementary reflectors. The other columns of A are unchanged. +// +// The contents of A on exit are illustrated by the following example +// with n = 7, k = 3 and nb = 2: +// [ a a a a a ] +// [ a a a a a ] +// [ a a a a a ] +// [ h h a a a ] +// [ v0 h a a a ] +// [ v0 v1 a a a ] +// [ v0 v1 a a a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// k is the offset for the reduction. Elements below the k-th subdiagonal in the +// first nb columns are reduced to zero. +// +// nb is the number of columns to be reduced. +// +// On entry, a represents the n×(n-k+1) matrix A. On return, the elements on and +// above the k-th subdiagonal in the first nb columns are overwritten with the +// corresponding elements of the reduced matrix. The elements below the k-th +// subdiagonal, with the slice tau, represent the matrix Q as a product of +// elementary reflectors. The other columns of A are unchanged. +// +// tau will contain the scalar factors of the elementary reflectors. It must +// have length at least nb. +// +// t and ldt represent the nb×nb upper triangular matrix T, and y and ldy +// represent the n×nb matrix Y. +// +// Dlahr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlahr2(n, k, nb int, a []float64, lda int, tau, t []float64, ldt int, y []float64, ldy int) { + checkMatrix(n, n-k+1, a, lda) + if len(tau) < nb { + panic(badTau) + } + checkMatrix(nb, nb, t, ldt) + checkMatrix(n, nb, y, ldy) + + // Quick return if possible. + if n <= 1 { + return + } + + bi := blas64.Implementation() + var ei float64 + for i := 0; i < nb; i++ { + if i > 0 { + // Update A[k:n,i]. + + // Update i-th column of A - Y * V^T. + bi.Dgemv(blas.NoTrans, n-k, i, + -1, y[k*ldy:], ldy, + a[(k+i-1)*lda:], 1, + 1, a[k*lda+i:], lda) + + // Apply I - V * T^T * V^T to this column (call it b) + // from the left, using the last column of T as + // workspace. + // Let V = [ V1 ] and b = [ b1 ] (first i rows) + // [ V2 ] [ b2 ] + // where V1 is unit lower triangular. + // + // w := V1^T * b1. + bi.Dcopy(i, a[k*lda+i:], lda, t[nb-1:], ldt) + bi.Dtrmv(blas.Lower, blas.Trans, blas.Unit, i, + a[k*lda:], lda, t[nb-1:], ldt) + + // w := w + V2^T * b2. + bi.Dgemv(blas.Trans, n-k-i, i, + 1, a[(k+i)*lda:], lda, + a[(k+i)*lda+i:], lda, + 1, t[nb-1:], ldt) + + // w := T^T * w. + bi.Dtrmv(blas.Upper, blas.Trans, blas.NonUnit, i, + t, ldt, t[nb-1:], ldt) + + // b2 := b2 - V2*w. + bi.Dgemv(blas.NoTrans, n-k-i, i, + -1, a[(k+i)*lda:], lda, + t[nb-1:], ldt, + 1, a[(k+i)*lda+i:], lda) + + // b1 := b1 - V1*w. + bi.Dtrmv(blas.Lower, blas.NoTrans, blas.Unit, i, + a[k*lda:], lda, t[nb-1:], ldt) + bi.Daxpy(i, -1, t[nb-1:], ldt, a[k*lda+i:], lda) + + a[(k+i-1)*lda+i-1] = ei + } + + // Generate the elementary reflector H_i to annihilate + // A[k+i+1:n,i]. + ei, tau[i] = impl.Dlarfg(n-k-i, a[(k+i)*lda+i], a[min(k+i+1, n-1)*lda+i:], lda) + a[(k+i)*lda+i] = 1 + + // Compute Y[k:n,i]. + bi.Dgemv(blas.NoTrans, n-k, n-k-i, + 1, a[k*lda+i+1:], lda, + a[(k+i)*lda+i:], lda, + 0, y[k*ldy+i:], ldy) + bi.Dgemv(blas.Trans, n-k-i, i, + 1, a[(k+i)*lda:], lda, + a[(k+i)*lda+i:], lda, + 0, t[i:], ldt) + bi.Dgemv(blas.NoTrans, n-k, i, + -1, y[k*ldy:], ldy, + t[i:], ldt, + 1, y[k*ldy+i:], ldy) + bi.Dscal(n-k, tau[i], y[k*ldy+i:], ldy) + + // Compute T[0:i,i]. + bi.Dscal(i, -tau[i], t[i:], ldt) + bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, + t, ldt, t[i:], ldt) + + t[i*ldt+i] = tau[i] + } + a[(k+nb-1)*lda+nb-1] = ei + + // Compute Y[0:k,0:nb]. + impl.Dlacpy(blas.All, k, nb, a[1:], lda, y, ldy) + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, k, nb, + 1, a[k*lda:], lda, y, ldy) + if n > k+nb { + bi.Dgemm(blas.NoTrans, blas.NoTrans, k, nb, n-k-nb, + 1, a[1+nb:], lda, + a[(k+nb)*lda:], lda, + 1, y, ldy) + } + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, k, nb, + 1, t, ldt, y, ldy) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go new file mode 100644 index 00000000..07cf5213 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go @@ -0,0 +1,396 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaln2 solves a linear equation or a system of 2 linear equations of the form +// (ca A - w D) X = scale B, if trans == false, +// (ca A^T - w D) X = scale B, if trans == true, +// where A is a na×na real matrix, ca is a real scalar, D is a na×na diagonal +// real matrix, w is a scalar, real if nw == 1, complex if nw == 2, and X and B +// are na×1 matrices, real if w is real, complex if w is complex. +// +// If w is complex, X and B are represented as na×2 matrices, the first column +// of each being the real part and the second being the imaginary part. +// +// na and nw must be 1 or 2, otherwise Dlaln2 will panic. +// +// d1 and d2 are the diagonal elements of D. d2 is not used if na == 1. +// +// wr and wi represent the real and imaginary part, respectively, of the scalar +// w. wi is not used if nw == 1. +// +// smin is the desired lower bound on the singular values of A. This should be +// a safe distance away from underflow or overflow, say, between +// (underflow/machine precision) and (overflow*machine precision). +// +// If both singular values of (ca A - w D) are less than smin, smin*identity +// will be used instead of (ca A - w D). If only one singular value is less than +// smin, one element of (ca A - w D) will be perturbed enough to make the +// smallest singular value roughly smin. If both singular values are at least +// smin, (ca A - w D) will not be perturbed. In any case, the perturbation will +// be at most some small multiple of max(smin, ulp*norm(ca A - w D)). The +// singular values are computed by infinity-norm approximations, and thus will +// only be correct to a factor of 2 or so. +// +// All input quantities are assumed to be smaller than overflow by a reasonable +// factor. +// +// scale is a scaling factor less than or equal to 1 which is chosen so that X +// can be computed without overflow. X is further scaled if necessary to assure +// that norm(ca A - w D)*norm(X) is less than overflow. +// +// xnorm contains the infinity-norm of X when X is regarded as a na×nw real +// matrix. +// +// ok will be false if (ca A - w D) had to be perturbed to make its smallest +// singular value greater than smin, otherwise ok will be true. +// +// Dlaln2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaln2(trans bool, na, nw int, smin, ca float64, a []float64, lda int, d1, d2 float64, b []float64, ldb int, wr, wi float64, x []float64, ldx int) (scale, xnorm float64, ok bool) { + // TODO(vladimir-ch): Consider splitting this function into two, one + // handling the real case (nw == 1) and the other handling the complex + // case (nw == 2). Given that Go has complex types, their signatures + // would be simpler and more natural, and the implementation not as + // convoluted. + + if na != 1 && na != 2 { + panic("lapack: invalid value of na") + } + if nw != 1 && nw != 2 { + panic("lapack: invalid value of nw") + } + checkMatrix(na, na, a, lda) + checkMatrix(na, nw, b, ldb) + checkMatrix(na, nw, x, ldx) + + smlnum := 2 * dlamchS + bignum := 1 / smlnum + smini := math.Max(smin, smlnum) + + ok = true + scale = 1 + + if na == 1 { + // 1×1 (i.e., scalar) system C X = B. + + if nw == 1 { + // Real 1×1 system. + + // C = ca A - w D. + csr := ca*a[0] - wr*d1 + cnorm := math.Abs(csr) + + // If |C| < smini, use C = smini. + if cnorm < smini { + csr = smini + cnorm = smini + ok = false + } + + // Check scaling for X = B / C. + bnorm := math.Abs(b[0]) + if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { + scale = 1 / bnorm + } + + // Compute X. + x[0] = b[0] * scale / csr + xnorm = math.Abs(x[0]) + + return scale, xnorm, ok + } + + // Complex 1×1 system (w is complex). + + // C = ca A - w D. + csr := ca*a[0] - wr*d1 + csi := -wi * d1 + cnorm := math.Abs(csr) + math.Abs(csi) + + // If |C| < smini, use C = smini. + if cnorm < smini { + csr = smini + csi = 0 + cnorm = smini + ok = false + } + + // Check scaling for X = B / C. + bnorm := math.Abs(b[0]) + math.Abs(b[1]) + if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { + scale = 1 / bnorm + } + + // Compute X. + cx := complex(scale*b[0], scale*b[1]) / complex(csr, csi) + x[0], x[1] = real(cx), imag(cx) + xnorm = math.Abs(x[0]) + math.Abs(x[1]) + + return scale, xnorm, ok + } + + // 2×2 system. + + // Compute the real part of + // C = ca A - w D + // or + // C = ca A^T - w D. + crv := [4]float64{ + ca*a[0] - wr*d1, + ca * a[1], + ca * a[lda], + ca*a[lda+1] - wr*d2, + } + if trans { + crv[1] = ca * a[lda] + crv[2] = ca * a[1] + } + + pivot := [4][4]int{ + {0, 1, 2, 3}, + {1, 0, 3, 2}, + {2, 3, 0, 1}, + {3, 2, 1, 0}, + } + + if nw == 1 { + // Real 2×2 system (w is real). + + // Find the largest element in C. + var cmax float64 + var icmax int + for j, v := range crv { + v = math.Abs(v) + if v > cmax { + cmax = v + icmax = j + } + } + + // If norm(C) < smini, use smini*identity. + if cmax < smini { + bnorm := math.Max(math.Abs(b[0]), math.Abs(b[ldb])) + if smini < 1 && bnorm > math.Max(1, bignum*smini) { + scale = 1 / bnorm + } + temp := scale / smini + x[0] = temp * b[0] + x[ldx] = temp * b[ldb] + xnorm = temp * bnorm + ok = false + + return scale, xnorm, ok + } + + // Gaussian elimination with complete pivoting. + // Form upper triangular matrix + // [ur11 ur12] + // [ 0 ur22] + ur11 := crv[icmax] + ur12 := crv[pivot[icmax][1]] + cr21 := crv[pivot[icmax][2]] + cr22 := crv[pivot[icmax][3]] + ur11r := 1 / ur11 + lr21 := ur11r * cr21 + ur22 := cr22 - ur12*lr21 + + // If smaller pivot < smini, use smini. + if math.Abs(ur22) < smini { + ur22 = smini + ok = false + } + + var br1, br2 float64 + if icmax > 1 { + // If the pivot lies in the second row, swap the rows. + br1 = b[ldb] + br2 = b[0] + } else { + br1 = b[0] + br2 = b[ldb] + } + br2 -= lr21 * br1 // Apply the Gaussian elimination step to the right-hand side. + + bbnd := math.Max(math.Abs(ur22*ur11r*br1), math.Abs(br2)) + if bbnd > 1 && math.Abs(ur22) < 1 && bbnd >= bignum*math.Abs(ur22) { + scale = 1 / bbnd + } + + // Solve the linear system ur*xr=br. + xr2 := br2 * scale / ur22 + xr1 := scale*br1*ur11r - ur11r*ur12*xr2 + if icmax&0x1 != 0 { + // If the pivot lies in the second column, swap the components of the solution. + x[0] = xr2 + x[ldx] = xr1 + } else { + x[0] = xr1 + x[ldx] = xr2 + } + xnorm = math.Max(math.Abs(xr1), math.Abs(xr2)) + + // Further scaling if norm(A)*norm(X) > overflow. + if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { + temp := cmax / bignum + x[0] *= temp + x[ldx] *= temp + xnorm *= temp + scale *= temp + } + + return scale, xnorm, ok + } + + // Complex 2×2 system (w is complex). + + // Find the largest element in C. + civ := [4]float64{ + -wi * d1, + 0, + 0, + -wi * d2, + } + var cmax float64 + var icmax int + for j, v := range crv { + v := math.Abs(v) + if v+math.Abs(civ[j]) > cmax { + cmax = v + math.Abs(civ[j]) + icmax = j + } + } + + // If norm(C) < smini, use smini*identity. + if cmax < smini { + br1 := math.Abs(b[0]) + math.Abs(b[1]) + br2 := math.Abs(b[ldb]) + math.Abs(b[ldb+1]) + bnorm := math.Max(br1, br2) + if smini < 1 && bnorm > 1 && bnorm > bignum*smini { + scale = 1 / bnorm + } + temp := scale / smini + x[0] = temp * b[0] + x[1] = temp * b[1] + x[ldb] = temp * b[ldb] + x[ldb+1] = temp * b[ldb+1] + xnorm = temp * bnorm + ok = false + + return scale, xnorm, ok + } + + // Gaussian elimination with complete pivoting. + ur11 := crv[icmax] + ui11 := civ[icmax] + ur12 := crv[pivot[icmax][1]] + ui12 := civ[pivot[icmax][1]] + cr21 := crv[pivot[icmax][2]] + ci21 := civ[pivot[icmax][2]] + cr22 := crv[pivot[icmax][3]] + ci22 := civ[pivot[icmax][3]] + var ( + ur11r, ui11r float64 + lr21, li21 float64 + ur12s, ui12s float64 + ur22, ui22 float64 + ) + if icmax == 0 || icmax == 3 { + // Off-diagonals of pivoted C are real. + if math.Abs(ur11) > math.Abs(ui11) { + temp := ui11 / ur11 + ur11r = 1 / (ur11 * (1 + temp*temp)) + ui11r = -temp * ur11r + } else { + temp := ur11 / ui11 + ui11r = -1 / (ui11 * (1 + temp*temp)) + ur11r = -temp * ui11r + } + lr21 = cr21 * ur11r + li21 = cr21 * ui11r + ur12s = ur12 * ur11r + ui12s = ur12 * ui11r + ur22 = cr22 - ur12*lr21 + ui22 = ci22 - ur12*li21 + } else { + // Diagonals of pivoted C are real. + ur11r = 1 / ur11 + // ui11r is already 0. + lr21 = cr21 * ur11r + li21 = ci21 * ur11r + ur12s = ur12 * ur11r + ui12s = ui12 * ur11r + ur22 = cr22 - ur12*lr21 + ui12*li21 + ui22 = -ur12*li21 - ui12*lr21 + } + u22abs := math.Abs(ur22) + math.Abs(ui22) + + // If smaller pivot < smini, use smini. + if u22abs < smini { + ur22 = smini + ui22 = 0 + ok = false + } + + var br1, bi1 float64 + var br2, bi2 float64 + if icmax > 1 { + // If the pivot lies in the second row, swap the rows. + br1 = b[ldb] + bi1 = b[ldb+1] + br2 = b[0] + bi2 = b[1] + } else { + br1 = b[0] + bi1 = b[1] + br2 = b[ldb] + bi2 = b[ldb+1] + } + br2 += -lr21*br1 + li21*bi1 + bi2 += -li21*br1 - lr21*bi1 + + bbnd1 := u22abs * (math.Abs(ur11r) + math.Abs(ui11r)) * (math.Abs(br1) + math.Abs(bi1)) + bbnd2 := math.Abs(br2) + math.Abs(bi2) + bbnd := math.Max(bbnd1, bbnd2) + if bbnd > 1 && u22abs < 1 && bbnd >= bignum*u22abs { + scale = 1 / bbnd + br1 *= scale + bi1 *= scale + br2 *= scale + bi2 *= scale + } + + cx2 := complex(br2, bi2) / complex(ur22, ui22) + xr2, xi2 := real(cx2), imag(cx2) + xr1 := ur11r*br1 - ui11r*bi1 - ur12s*xr2 + ui12s*xi2 + xi1 := ui11r*br1 + ur11r*bi1 - ui12s*xr2 - ur12s*xi2 + if icmax&0x1 != 0 { + // If the pivot lies in the second column, swap the components of the solution. + x[0] = xr2 + x[1] = xi2 + x[ldx] = xr1 + x[ldx+1] = xi1 + } else { + x[0] = xr1 + x[1] = xi1 + x[ldx] = xr2 + x[ldx+1] = xi2 + } + xnorm = math.Max(math.Abs(xr1)+math.Abs(xi1), math.Abs(xr2)+math.Abs(xi2)) + + // Further scaling if norm(A)*norm(X) > overflow. + if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { + temp := cmax / bignum + x[0] *= temp + x[1] *= temp + x[ldx] *= temp + x[ldx+1] *= temp + xnorm *= temp + scale *= temp + } + + return scale, xnorm, ok +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go new file mode 100644 index 00000000..76bce5ec --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go @@ -0,0 +1,84 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlange computes the matrix norm of the general m×n matrix a. The input norm +// specifies the norm computed. +// lapack.MaxAbs: the maximum absolute value of an element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. +// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. +// lapack.NormFrob: the square root of the sum of the squares of the entries. +// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func (impl Implementation) Dlange(norm lapack.MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 { + // TODO(btracey): These should probably be refactored to use BLAS calls. + checkMatrix(m, n, a, lda) + switch norm { + case lapack.MaxRowSum, lapack.MaxColumnSum, lapack.NormFrob, lapack.MaxAbs: + default: + panic(badNorm) + } + if norm == lapack.MaxColumnSum && len(work) < n { + panic(badWork) + } + if m == 0 && n == 0 { + return 0 + } + if norm == lapack.MaxAbs { + var value float64 + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + value = math.Max(value, math.Abs(a[i*lda+j])) + } + } + return value + } + if norm == lapack.MaxColumnSum { + if len(work) < n { + panic(badWork) + } + for i := 0; i < n; i++ { + work[i] = 0 + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + var value float64 + for i := 0; i < n; i++ { + value = math.Max(value, work[i]) + } + return value + } + if norm == lapack.MaxRowSum { + var value float64 + for i := 0; i < m; i++ { + var sum float64 + for j := 0; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + value = math.Max(value, sum) + } + return value + } + if norm == lapack.NormFrob { + var value float64 + scale := 0.0 + sum := 1.0 + for i := 0; i < m; i++ { + scale, sum = impl.Dlassq(n, a[i*lda:], 1, scale, sum) + } + value = scale * math.Sqrt(sum) + return value + } + panic("lapack: bad matrix norm") +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go new file mode 100644 index 00000000..74eafeb2 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go @@ -0,0 +1,68 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlanst computes the specified norm of a symmetric tridiagonal matrix A. +// The diagonal elements of A are stored in d and the off-diagonal elements +// are stored in e. +func (impl Implementation) Dlanst(norm lapack.MatrixNorm, n int, d, e []float64) float64 { + if len(d) < n { + panic(badD) + } + if len(e) < n-1 { + panic(badE) + } + if n <= 0 { + return 0 + } + switch norm { + default: + panic(badNorm) + case lapack.MaxAbs: + anorm := math.Abs(d[n-1]) + for i := 0; i < n-1; i++ { + sum := math.Abs(d[i]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + sum = math.Abs(e[i]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + } + return anorm + case lapack.MaxColumnSum, lapack.MaxRowSum: + if n == 1 { + return math.Abs(d[0]) + } + anorm := math.Abs(d[0]) + math.Abs(e[0]) + sum := math.Abs(e[n-2]) + math.Abs(d[n-1]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + for i := 1; i < n-1; i++ { + sum := math.Abs(d[i]) + math.Abs(e[i]) + math.Abs(e[i-1]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + } + return anorm + case lapack.NormFrob: + var scale float64 + sum := 1.0 + if n > 1 { + scale, sum = impl.Dlassq(n-1, e, 1, scale, sum) + sum = 2 * sum + } + scale, sum = impl.Dlassq(n, d, 1, scale, sum) + return scale * math.Sqrt(sum) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go new file mode 100644 index 00000000..2a380d21 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go @@ -0,0 +1,125 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlansy computes the specified norm of an n×n symmetric matrix. If +// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length +// at least n, otherwise work is unused. +func (impl Implementation) Dlansy(norm lapack.MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 { + checkMatrix(n, n, a, lda) + switch norm { + case lapack.MaxRowSum, lapack.MaxColumnSum, lapack.NormFrob, lapack.MaxAbs: + default: + panic(badNorm) + } + if (norm == lapack.MaxColumnSum || norm == lapack.MaxRowSum) && len(work) < n { + panic(badWork) + } + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + + if n == 0 { + return 0 + } + switch norm { + default: + panic("unreachable") + case lapack.MaxAbs: + if uplo == blas.Upper { + var max float64 + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + v := math.Abs(a[i*lda+j]) + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + } + return max + } + var max float64 + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + v := math.Abs(a[i*lda+j]) + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + } + return max + case lapack.MaxRowSum, lapack.MaxColumnSum: + // A symmetric matrix has the same 1-norm and ∞-norm. + for i := 0; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < n; i++ { + work[i] += math.Abs(a[i*lda+i]) + for j := i + 1; j < n; j++ { + v := math.Abs(a[i*lda+j]) + work[i] += v + work[j] += v + } + } + } else { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + v := math.Abs(a[i*lda+j]) + work[i] += v + work[j] += v + } + work[i] += math.Abs(a[i*lda+i]) + } + } + var max float64 + for i := 0; i < n; i++ { + v := work[i] + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + return max + case lapack.NormFrob: + if uplo == blas.Upper { + var sum float64 + for i := 0; i < n; i++ { + v := a[i*lda+i] + sum += v * v + for j := i + 1; j < n; j++ { + v := a[i*lda+j] + sum += 2 * v * v + } + } + return math.Sqrt(sum) + } + var sum float64 + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + v := a[i*lda+j] + sum += 2 * v * v + } + v := a[i*lda+i] + sum += v * v + } + return math.Sqrt(sum) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go new file mode 100644 index 00000000..25702cff --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go @@ -0,0 +1,252 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlantr computes the specified norm of an m×n trapezoidal matrix A. If +// norm == lapack.MaxColumnSum work must have length at least n, otherwise work +// is unused. +func (impl Implementation) Dlantr(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 { + checkMatrix(m, n, a, lda) + switch norm { + case lapack.MaxRowSum, lapack.MaxColumnSum, lapack.NormFrob, lapack.MaxAbs: + default: + panic(badNorm) + } + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + if norm == lapack.MaxColumnSum && len(work) < n { + panic(badWork) + } + if min(m, n) == 0 { + return 0 + } + switch norm { + default: + panic("unreachable") + case lapack.MaxAbs: + if diag == blas.Unit { + value := 1.0 + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + var value float64 + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + case lapack.MaxColumnSum: + if diag == blas.Unit { + for i := 0; i < min(m, n); i++ { + work[i] = 1 + } + for i := min(m, n); i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } else { + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } + } else { + for i := 0; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } + } + var max float64 + for _, v := range work[:n] { + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + return max + case lapack.MaxRowSum: + var maxsum float64 + if diag == blas.Unit { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + var sum float64 + if i < min(m, n) { + sum = 1 + } + for j := i + 1; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return math.NaN() + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } else { + for i := 1; i < m; i++ { + var sum float64 + if i < min(m, n) { + sum = 1 + } + for j := 0; j < min(i, n); j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return math.NaN() + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } + } else { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + var sum float64 + for j := i; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return sum + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } else { + for i := 0; i < m; i++ { + var sum float64 + for j := 0; j <= min(i, n-1); j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return sum + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } + } + case lapack.NormFrob: + var nrm float64 + if diag == blas.Unit { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + tmp := a[i*lda+j] + nrm += tmp * tmp + } + } + } else { + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + tmp := a[i*lda+j] + nrm += tmp * tmp + } + } + } + nrm += float64(min(m, n)) + } else { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + nrm += tmp * tmp + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + tmp := math.Abs(a[i*lda+j]) + nrm += tmp * tmp + } + } + } + } + return math.Sqrt(nrm) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go new file mode 100644 index 00000000..e5dcfb75 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go @@ -0,0 +1,132 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlanv2 computes the Schur factorization of a real 2×2 matrix: +// [ a b ] = [ cs -sn ] * [ aa bb ] * [ cs sn ] +// [ c d ] [ sn cs ] [ cc dd ] * [-sn cs ] +// If cc is zero, aa and dd are real eigenvalues of the matrix. Otherwise it +// holds that aa = dd and bb*cc < 0, and aa ± sqrt(bb*cc) are complex conjugate +// eigenvalues. The real and imaginary parts of the eigenvalues are returned in +// (rt1r,rt1i) and (rt2r,rt2i). +func (impl Implementation) Dlanv2(a, b, c, d float64) (aa, bb, cc, dd float64, rt1r, rt1i, rt2r, rt2i float64, cs, sn float64) { + switch { + case c == 0: // Matrix is already upper triangular. + aa = a + bb = b + cc = 0 + dd = d + cs = 1 + sn = 0 + case b == 0: // Matrix is lower triangular, swap rows and columns. + aa = d + bb = -c + cc = 0 + dd = a + cs = 0 + sn = 1 + case a == d && math.Signbit(b) != math.Signbit(c): // Matrix is already in the standard Schur form. + aa = a + bb = b + cc = c + dd = d + cs = 1 + sn = 0 + default: + temp := a - d + p := temp / 2 + bcmax := math.Max(math.Abs(b), math.Abs(c)) + bcmis := math.Min(math.Abs(b), math.Abs(c)) + if b*c < 0 { + bcmis *= -1 + } + scale := math.Max(math.Abs(p), bcmax) + z := p/scale*p + bcmax/scale*bcmis + eps := dlamchP + + if z >= 4*eps { + // Real eigenvalues. Compute aa and dd. + if p > 0 { + z = p + math.Sqrt(scale)*math.Sqrt(z) + } else { + z = p - math.Sqrt(scale)*math.Sqrt(z) + } + aa = d + z + dd = d - bcmax/z*bcmis + // Compute bb and the rotation matrix. + tau := impl.Dlapy2(c, z) + cs = z / tau + sn = c / tau + bb = b - c + cc = 0 + } else { + // Complex eigenvalues, or real (almost) equal eigenvalues. + // Make diagonal elements equal. + sigma := b + c + tau := impl.Dlapy2(sigma, temp) + cs = math.Sqrt((1 + math.Abs(sigma)/tau) / 2) + sn = -p / (tau * cs) + if sigma < 0 { + sn *= -1 + } + // Compute [ aa bb ] = [ a b ] [ cs -sn ] + // [ cc dd ] [ c d ] [ sn cs ] + aa = a*cs + b*sn + bb = -a*sn + b*cs + cc = c*cs + d*sn + dd = -c*sn + d*cs + // Compute [ a b ] = [ cs sn ] [ aa bb ] + // [ c d ] [-sn cs ] [ cc dd ] + a = aa*cs + cc*sn + b = bb*cs + dd*sn + c = -aa*sn + cc*cs + d = -bb*sn + dd*cs + + temp = (a + d) / 2 + aa = temp + bb = b + cc = c + dd = temp + + if cc != 0 { + if bb != 0 { + if math.Signbit(bb) == math.Signbit(cc) { + // Real eigenvalues, reduce to + // upper triangular form. + sab := math.Sqrt(math.Abs(bb)) + sac := math.Sqrt(math.Abs(cc)) + p = sab * sac + if cc < 0 { + p *= -1 + } + tau = 1 / math.Sqrt(math.Abs(bb+cc)) + aa = temp + p + bb = bb - cc + cc = 0 + dd = temp - p + cs1 := sab * tau + sn1 := sac * tau + cs, sn = cs*cs1-sn*sn1, cs*sn1+sn+cs1 + } + } else { + bb = -cc + cc = 0 + cs, sn = -sn, cs + } + } + } + } + + // Store eigenvalues in (rt1r,rt1i) and (rt2r,rt2i). + rt1r = aa + rt2r = dd + if cc != 0 { + rt1i = math.Sqrt(math.Abs(bb)) * math.Sqrt(math.Abs(cc)) + rt2i = -rt1i + } + return +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go new file mode 100644 index 00000000..cb5c0b7e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go @@ -0,0 +1,36 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlapll returns the smallest singular value of the n×2 matrix A = [ x y ]. +// The function first computes the QR factorization of A = Q*R, and then computes +// the SVD of the 2-by-2 upper triangular matrix r. +// +// The contents of x and y are overwritten during the call. +// +// Dlapll is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlapll(n int, x []float64, incX int, y []float64, incY int) float64 { + checkVector(n, x, incX) + checkVector(n, y, incY) + + if n <= 1 { + return 0 + } + + // Compute the QR factorization of the N-by-2 matrix [ X Y ]. + a00, tau := impl.Dlarfg(n, x[0], x[incX:], incX) + x[0] = 1 + + bi := blas64.Implementation() + c := -tau * bi.Ddot(n, x, incX, y, incY) + bi.Daxpy(n, c, x, incX, y, incY) + a11, _ := impl.Dlarfg(n-1, y[incY], y[2*incY:], incY) + + // Compute the SVD of 2-by-2 upper triangular matrix. + ssmin, _ := impl.Dlas2(a00, y[0], a11) + return ssmin +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go new file mode 100644 index 00000000..f5a5b1fd --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go @@ -0,0 +1,72 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlapmt rearranges the columns of the m×n matrix X as specified by the +// permutation k_0, k_1, ..., k_n-1 of the integers 0, ..., n-1. +// +// If forward is true a forward permutation is performed: +// +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// +// otherwise a backward permutation is performed: +// +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// +// k must have length n, otherwise Dlapmt will panic. k is zero-indexed. +func (impl Implementation) Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) { + checkMatrix(m, n, x, ldx) + if len(k) != n { + panic(badKperm) + } + + if n <= 1 { + return + } + + for i, v := range k { + v++ + k[i] = -v + } + + bi := blas64.Implementation() + + if forward { + for j, v := range k { + if v >= 0 { + continue + } + k[j] = -v + i := -v - 1 + for k[i] < 0 { + bi.Dswap(m, x[j:], ldx, x[i:], ldx) + + k[i] = -k[i] + j = i + i = k[i] - 1 + } + } + } else { + for i, v := range k { + if v >= 0 { + continue + } + k[i] = -v + j := -v - 1 + for j != i { + bi.Dswap(m, x[j:], ldx, x[i:], ldx) + + k[j] = -k[j] + j = k[j] - 1 + } + } + } + + for i := range k { + k[i]-- + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go new file mode 100644 index 00000000..19f73ffa --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go @@ -0,0 +1,14 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlapy2 is the LAPACK version of math.Hypot. +// +// Dlapy2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dlapy2(x, y float64) float64 { + return math.Hypot(x, y) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go new file mode 100644 index 00000000..80f43905 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go @@ -0,0 +1,111 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqp2 computes a QR factorization with column pivoting of the block A[offset:m, 0:n] +// of the m×n matrix A. The block A[0:offset, 0:n] is accordingly pivoted, but not factorized. +// +// On exit, the upper triangle of block A[offset:m, 0:n] is the triangular factor obtained. +// The elements in block A[offset:m, 0:n] below the diagonal, together with tau, represent +// the orthogonal matrix Q as a product of elementary reflectors. +// +// offset is number of rows of the matrix A that must be pivoted but not factorized. +// offset must not be negative otherwise Dlaqp2 will panic. +// +// On exit, jpvt holds the permutation that was applied; the jth column of A*P was the +// jpvt[j] column of A. jpvt must have length n, otherwise Dlaqp2 will panic. +// +// On exit tau holds the scalar factors of the elementary reflectors. It must have length +// at least min(m-offset, n) otherwise Dlaqp2 will panic. +// +// vn1 and vn2 hold the partial and complete column norms respectively. They must have length n, +// otherwise Dlaqp2 will panic. +// +// work must have length n, otherwise Dlaqp2 will panic. +// +// Dlaqp2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqp2(m, n, offset int, a []float64, lda int, jpvt []int, tau, vn1, vn2, work []float64) { + checkMatrix(m, n, a, lda) + if len(jpvt) != n { + panic(badIpiv) + } + mn := min(m-offset, n) + if len(tau) < mn { + panic(badTau) + } + if len(vn1) < n { + panic(badVn1) + } + if len(vn2) < n { + panic(badVn2) + } + if len(work) < n { + panic(badWork) + } + + tol3z := math.Sqrt(dlamchE) + + bi := blas64.Implementation() + + // Compute factorization. + for i := 0; i < mn; i++ { + offpi := offset + i + + // Determine ith pivot column and swap if necessary. + p := i + bi.Idamax(n-i, vn1[i:], 1) + if p != i { + bi.Dswap(m, a[p:], lda, a[i:], lda) + jpvt[p], jpvt[i] = jpvt[i], jpvt[p] + vn1[p] = vn1[i] + vn2[p] = vn2[i] + } + + // Generate elementary reflector H_i. + if offpi < m-1 { + a[offpi*lda+i], tau[i] = impl.Dlarfg(m-offpi, a[offpi*lda+i], a[(offpi+1)*lda+i:], lda) + } else { + tau[i] = 0 + } + + if i < n-1 { + // Apply H_i^T to A[offset+i:m, i:n] from the left. + aii := a[offpi*lda+i] + a[offpi*lda+i] = 1 + impl.Dlarf(blas.Left, m-offpi, n-i-1, a[offpi*lda+i:], lda, tau[i], a[offpi*lda+i+1:], lda, work) + a[offpi*lda+i] = aii + } + + // Update partial column norms. + for j := i + 1; j < n; j++ { + if vn1[j] == 0 { + continue + } + + // The following marked lines follow from the + // analysis in Lapack Working Note 176. + r := math.Abs(a[offpi*lda+j]) / vn1[j] // * + temp := math.Max(0, 1-r*r) // * + r = vn1[j] / vn2[j] // * + temp2 := temp * r * r // * + if temp2 < tol3z { + var v float64 + if offpi < m-1 { + v = bi.Dnrm2(m-offpi-1, a[(offpi+1)*lda+j:], lda) + } + vn1[j] = v + vn2[j] = v + } else { + vn1[j] *= math.Sqrt(temp) // * + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go new file mode 100644 index 00000000..89cfd094 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go @@ -0,0 +1,217 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqps computes a step of QR factorization with column pivoting +// of an m×n matrix A by using Blas-3. It tries to factorize nb +// columns from A starting from the row offset, and updates all +// of the matrix with Dgemm. +// +// In some cases, due to catastrophic cancellations, it cannot +// factorize nb columns. Hence, the actual number of factorized +// columns is returned in kb. +// +// Dlaqps computes a QR factorization with column pivoting of the +// block A[offset:m, 0:nb] of the m×n matrix A. The block +// A[0:offset, 0:n] is accordingly pivoted, but not factorized. +// +// On exit, the upper triangle of block A[offset:m, 0:kb] is the +// triangular factor obtained. The elements in block A[offset:m, 0:n] +// below the diagonal, together with tau, represent the orthogonal +// matrix Q as a product of elementary reflectors. +// +// offset is number of rows of the matrix A that must be pivoted but +// not factorized. offset must not be negative otherwise Dlaqps will panic. +// +// On exit, jpvt holds the permutation that was applied; the jth column +// of A*P was the jpvt[j] column of A. jpvt must have length n, +// otherwise Dlapqs will panic. +// +// On exit tau holds the scalar factors of the elementary reflectors. +// It must have length nb, otherwise Dlapqs will panic. +// +// vn1 and vn2 hold the partial and complete column norms respectively. +// They must have length n, otherwise Dlapqs will panic. +// +// auxv must have length nb, otherwise Dlaqps will panic. +// +// f and ldf represent an n×nb matrix F that is overwritten during the +// call. +// +// Dlaqps is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqps(m, n, offset, nb int, a []float64, lda int, jpvt []int, tau, vn1, vn2, auxv, f []float64, ldf int) (kb int) { + checkMatrix(m, n, a, lda) + checkMatrix(n, nb, f, ldf) + if offset > m { + panic(offsetGTM) + } + if n < 0 || nb > n { + panic(badNb) + } + if len(jpvt) != n { + panic(badIpiv) + } + if len(tau) < nb { + panic(badTau) + } + if len(vn1) < n { + panic(badVn1) + } + if len(vn2) < n { + panic(badVn2) + } + if len(auxv) < nb { + panic(badAuxv) + } + + lastrk := min(m, n+offset) + lsticc := -1 + tol3z := math.Sqrt(dlamchE) + + bi := blas64.Implementation() + + var k, rk int + for ; k < nb && lsticc == -1; k++ { + rk = offset + k + + // Determine kth pivot column and swap if necessary. + p := k + bi.Idamax(n-k, vn1[k:], 1) + if p != k { + bi.Dswap(m, a[p:], lda, a[k:], lda) + bi.Dswap(k, f[p*ldf:], 1, f[k*ldf:], 1) + jpvt[p], jpvt[k] = jpvt[k], jpvt[p] + vn1[p] = vn1[k] + vn2[p] = vn2[k] + } + + // Apply previous Householder reflectors to column K: + // + // A[rk:m, k] = A[rk:m, k] - A[rk:m, 0:k-1]*F[k, 0:k-1]^T. + if k > 0 { + bi.Dgemv(blas.NoTrans, m-rk, k, -1, + a[rk*lda:], lda, + f[k*ldf:], 1, + 1, + a[rk*lda+k:], lda) + } + + // Generate elementary reflector H_k. + if rk < m-1 { + a[rk*lda+k], tau[k] = impl.Dlarfg(m-rk, a[rk*lda+k], a[(rk+1)*lda+k:], lda) + } else { + tau[k] = 0 + } + + akk := a[rk*lda+k] + a[rk*lda+k] = 1 + + // Compute kth column of F: + // + // Compute F[k+1:n, k] = tau[k]*A[rk:m, k+1:n]^T*A[rk:m, k]. + if k < n-1 { + bi.Dgemv(blas.Trans, m-rk, n-k-1, tau[k], + a[rk*lda+k+1:], lda, + a[rk*lda+k:], lda, + 0, + f[(k+1)*ldf+k:], ldf) + } + + // Padding F[0:k, k] with zeros. + for j := 0; j < k; j++ { + f[j*ldf+k] = 0 + } + + // Incremental updating of F: + // + // F[0:n, k] := F[0:n, k] - tau[k]*F[0:n, 0:k-1]*A[rk:m, 0:k-1]^T*A[rk:m,k]. + if k > 0 { + bi.Dgemv(blas.Trans, m-rk, k, -tau[k], + a[rk*lda:], lda, + a[rk*lda+k:], lda, + 0, + auxv, 1) + bi.Dgemv(blas.NoTrans, n, k, 1, + f, ldf, + auxv, 1, + 1, + f[k:], ldf) + } + + // Update the current row of A: + // + // A[rk, k+1:n] = A[rk, k+1:n] - A[rk, 0:k]*F[k+1:n, 0:k]^T. + if k < n-1 { + bi.Dgemv(blas.NoTrans, n-k-1, k+1, -1, + f[(k+1)*ldf:], ldf, + a[rk*lda:], 1, + 1, + a[rk*lda+k+1:], 1) + } + + // Update partial column norms. + if rk < lastrk-1 { + for j := k + 1; j < n; j++ { + if vn1[j] == 0 { + continue + } + + // The following marked lines follow from the + // analysis in Lapack Working Note 176. + r := math.Abs(a[rk*lda+j]) / vn1[j] // * + temp := math.Max(0, 1-r*r) // * + r = vn1[j] / vn2[j] // * + temp2 := temp * r * r // * + if temp2 < tol3z { + // vn2 is used here as a collection of + // indices into vn2 and also a collection + // of column norms. + vn2[j] = float64(lsticc) + lsticc = j + } else { + vn1[j] *= math.Sqrt(temp) // * + } + } + } + + a[rk*lda+k] = akk + } + kb = k + rk = offset + kb + + // Apply the block reflector to the rest of the matrix: + // + // A[offset+kb+1:m, kb+1:n] := A[offset+kb+1:m, kb+1:n] - A[offset+kb+1:m, 1:kb]*F[kb+1:n, 1:kb]^T. + if kb < min(n, m-offset) { + bi.Dgemm(blas.NoTrans, blas.Trans, + m-rk, n-kb, kb, -1, + a[rk*lda:], lda, + f[kb*ldf:], ldf, + 1, + a[rk*lda+kb:], lda) + } + + // Recomputation of difficult columns. + for lsticc >= 0 { + itemp := int(vn2[lsticc]) + + // NOTE: The computation of vn1[lsticc] relies on the fact that + // Dnrm2 does not fail on vectors with norm below the value of + // sqrt(dlamchS) + v := bi.Dnrm2(m-rk, a[rk*lda+lsticc:], lda) + vn1[lsticc] = v + vn2[lsticc] = v + + lsticc = itemp + } + + return kb +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go new file mode 100644 index 00000000..945c657d --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go @@ -0,0 +1,475 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" +) + +// Dlaqr04 computes the eigenvalues of a block of an n×n upper Hessenberg matrix +// H, and optionally the matrices T and Z from the Schur decomposition +// H = Z T Z^T +// where T is an upper quasi-triangular matrix (the Schur form), and Z is the +// orthogonal matrix of Schur vectors. +// +// wantt indicates whether the full Schur form T is required. If wantt is false, +// then only enough of H will be updated to preserve the eigenvalues. +// +// wantz indicates whether the n×n matrix of Schur vectors Z is required. If it +// is true, the orthogonal similarity transformation will be accumulated into +// Z[iloz:ihiz+1,ilo:ihi+1], otherwise Z will not be referenced. +// +// ilo and ihi determine the block of H on which Dlaqr04 operates. It must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0, +// and the block must be isolated, that is, +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// otherwise Dlaqr04 will panic. +// +// wr and wi must have length ihi+1. +// +// iloz and ihiz specify the rows of Z to which transformations will be applied +// if wantz is true. It must hold that +// 0 <= iloz <= ilo, and ihi <= ihiz < n, +// otherwise Dlaqr04 will panic. +// +// work must have length at least lwork and lwork must be +// lwork >= 1, if n <= 11, +// lwork >= n, if n > 11, +// otherwise Dlaqr04 will panic. lwork as large as 6*n may be required for +// optimal performance. On return, work[0] will contain the optimal value of +// lwork. +// +// If lwork is -1, instead of performing Dlaqr04, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// recur is the non-negative recursion depth. For recur > 0, Dlaqr04 behaves +// as DLAQR0, for recur == 0 it behaves as DLAQR4. +// +// unconverged indicates whether Dlaqr04 computed all the eigenvalues of H[ilo:ihi+1,ilo:ihi+1]. +// +// If unconverged is zero and wantt is true, H will contain on return the upper +// quasi-triangular matrix T from the Schur decomposition. 2×2 diagonal blocks +// (corresponding to complex conjugate pairs of eigenvalues) will be returned in +// standard form, with H[i,i] == H[i+1,i+1] and H[i+1,i]*H[i,i+1] < 0. +// +// If unconverged is zero and if wantt is false, the contents of h on return is +// unspecified. +// +// If unconverged is zero, all the eigenvalues have been computed and their real +// and imaginary parts will be stored on return in wr[ilo:ihi+1] and +// wi[ilo:ihi+1], respectively. If two eigenvalues are computed as a complex +// conjugate pair, they are stored in consecutive elements of wr and wi, say the +// i-th and (i+1)th, with wi[i] > 0 and wi[i+1] < 0. If wantt is true, then the +// eigenvalues are stored in the same order as on the diagonal of the Schur form +// returned in H, with wr[i] = H[i,i] and, if H[i:i+2,i:i+2] is a 2×2 diagonal +// block, wi[i] = sqrt(-H[i+1,i]*H[i,i+1]) and wi[i+1] = -wi[i]. +// +// If unconverged is positive, some eigenvalues have not converged, and +// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] will contain those +// eigenvalues which have been successfully computed. Failures are rare. +// +// If unconverged is positive and wantt is true, then on return +// (initial H)*U = U*(final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged is positive and wantt is false, on return the remaining +// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix +// H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged is positive and wantz is true, then on return +// (final Z) = (initial Z)*U, +// where U is the orthogonal matrix in (*) regardless of the value of wantt. +// +// References: +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// Dlaqr04 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr04(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int, work []float64, lwork int, recur int) (unconverged int) { + const ( + // Matrices of order ntiny or smaller must be processed by + // Dlahqr because of insufficient subdiagonal scratch space. + // This is a hard limit. + ntiny = 11 + // Exceptional deflation windows: try to cure rare slow + // convergence by varying the size of the deflation window after + // kexnw iterations. + kexnw = 5 + // Exceptional shifts: try to cure rare slow convergence with + // ad-hoc exceptional shifts every kexsh iterations. + kexsh = 6 + + // See https://github.com/gonum/lapack/pull/151#discussion_r68162802 + // and the surrounding discussion for an explanation where these + // constants come from. + // TODO(vladimir-ch): Similar constants for exceptional shifts + // are used also in dlahqr.go. The first constant is different + // there, it is equal to 3. Why? And does it matter? + wilk1 = 0.75 + wilk2 = -0.4375 + ) + + switch { + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lwork < 1 && n <= ntiny && lwork != -1: + panic(badWork) + // TODO(vladimir-ch): Enable if and when we figure out what the minimum + // necessary lwork value is. Dlaqr04 says that the minimum is n which + // clashes with Dlaqr23's opinion about optimal work when nw <= 2 + // (independent of n). + // case lwork < n && n > ntiny && lwork != -1: + // panic(badWork) + case len(work) < lwork: + panic(shortWork) + case recur < 0: + panic("lapack: recur is negative") + } + if wantz { + if iloz < 0 || ilo < iloz { + panic("lapack: invalid value of iloz") + } + if ihiz < ihi || n <= ihiz { + panic("lapack: invalid value of ihiz") + } + } + if lwork != -1 { + checkMatrix(n, n, h, ldh) + if wantz { + checkMatrix(n, n, z, ldz) + } + switch { + case ilo > 0 && h[ilo*ldh+ilo-1] != 0: + panic("lapack: block not isolated") + case ihi+1 < n && h[(ihi+1)*ldh+ihi] != 0: + panic("lapack: block not isolated") + case len(wr) != ihi+1: + panic("lapack: bad length of wr") + case len(wi) != ihi+1: + panic("lapack: bad length of wi") + } + } + + // Quick return. + if n == 0 { + work[0] = 1 + return 0 + } + + if n <= ntiny { + // Tiny matrices must use Dlahqr. + work[0] = 1 + if lwork == -1 { + return 0 + } + return impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz) + } + + // Use small bulge multi-shift QR with aggressive early deflation on + // larger-than-tiny matrices. + var jbcmpz string + if wantt { + jbcmpz = "S" + } else { + jbcmpz = "E" + } + if wantz { + jbcmpz += "V" + } else { + jbcmpz += "N" + } + + var fname string + if recur > 0 { + fname = "DLAQR0" + } else { + fname = "DLAQR4" + } + // nwr is the recommended deflation window size. n is greater than 11, + // so there is enough subdiagonal workspace for nwr >= 2 as required. + // (In fact, there is enough subdiagonal space for nwr >= 3.) + // TODO(vladimir-ch): If there is enough space for nwr >= 3, should we + // use it? + nwr := impl.Ilaenv(13, fname, jbcmpz, n, ilo, ihi, lwork) + nwr = max(2, nwr) + nwr = min(ihi-ilo+1, min((n-1)/3, nwr)) + + // nsr is the recommended number of simultaneous shifts. n is greater + // than 11, so there is enough subdiagonal workspace for nsr to be even + // and greater than or equal to two as required. + nsr := impl.Ilaenv(15, fname, jbcmpz, n, ilo, ihi, lwork) + nsr = min(nsr, min((n+6)/9, ihi-ilo)) + nsr = max(2, nsr&^1) + + // Workspace query call to Dlaqr23. + impl.Dlaqr23(wantt, wantz, n, ilo, ihi, nwr+1, nil, 0, iloz, ihiz, nil, 0, + nil, nil, nil, 0, n, nil, 0, n, nil, 0, work, -1, recur) + // Optimal workspace is max(Dlaqr5, Dlaqr23). + lwkopt := max(3*nsr/2, int(work[0])) + // Quick return in case of workspace query. + if lwork == -1 { + work[0] = float64(lwkopt) + return 0 + } + + // Dlahqr/Dlaqr04 crossover point. + nmin := impl.Ilaenv(12, fname, jbcmpz, n, ilo, ihi, lwork) + nmin = max(ntiny, nmin) + + // Nibble determines when to skip a multi-shift QR sweep (Dlaqr5). + nibble := impl.Ilaenv(14, fname, jbcmpz, n, ilo, ihi, lwork) + nibble = max(0, nibble) + + // Computation mode of far-from-diagonal orthogonal updates in Dlaqr5. + kacc22 := impl.Ilaenv(16, fname, jbcmpz, n, ilo, ihi, lwork) + kacc22 = max(0, min(kacc22, 2)) + + // nwmax is the largest possible deflation window for which there is + // sufficient workspace. + nwmax := min((n-1)/3, lwork/2) + nw := nwmax // Start with maximum deflation window size. + + // nsmax is the largest number of simultaneous shifts for which there is + // sufficient workspace. + nsmax := min((n+6)/9, 2*lwork/3) &^ 1 + + ndfl := 1 // Number of iterations since last deflation. + ndec := 0 // Deflation window size decrement. + + // Main loop. + var ( + itmax = max(30, 2*kexsh) * max(10, (ihi-ilo+1)) + it = 0 + ) + for kbot := ihi; kbot >= ilo; { + if it == itmax { + unconverged = kbot + 1 + break + } + it++ + + // Locate active block. + ktop := ilo + for k := kbot; k >= ilo+1; k-- { + if h[k*ldh+k-1] == 0 { + ktop = k + break + } + } + + // Select deflation window size nw. + // + // Typical Case: + // If possible and advisable, nibble the entire active block. + // If not, use size min(nwr,nwmax) or min(nwr+1,nwmax) + // depending upon which has the smaller corresponding + // subdiagonal entry (a heuristic). + // + // Exceptional Case: + // If there have been no deflations in kexnw or more + // iterations, then vary the deflation window size. At first, + // because larger windows are, in general, more powerful than + // smaller ones, rapidly increase the window to the maximum + // possible. Then, gradually reduce the window size. + nh := kbot - ktop + 1 + nwupbd := min(nh, nwmax) + if ndfl < kexnw { + nw = min(nwupbd, nwr) + } else { + nw = min(nwupbd, 2*nw) + } + if nw < nwmax { + if nw >= nh-1 { + nw = nh + } else { + kwtop := kbot - nw + 1 + if math.Abs(h[kwtop*ldh+kwtop-1]) > math.Abs(h[(kwtop-1)*ldh+kwtop-2]) { + nw++ + } + } + } + if ndfl < kexnw { + ndec = -1 + } else if ndec >= 0 || nw >= nwupbd { + ndec++ + if nw-ndec < 2 { + ndec = 0 + } + nw -= ndec + } + + // Split workspace under the subdiagonal of H into: + // - an nw×nw work array V in the lower left-hand corner, + // - an nw×nhv horizontal work array along the bottom edge (nhv + // must be at least nw but more is better), + // - an nve×nw vertical work array along the left-hand-edge + // (nhv can be any positive integer but more is better). + kv := n - nw + kt := nw + kwv := nw + 1 + nhv := n - kwv - kt + // Aggressive early deflation. + ls, ld := impl.Dlaqr23(wantt, wantz, n, ktop, kbot, nw, + h, ldh, iloz, ihiz, z, ldz, wr[:kbot+1], wi[:kbot+1], + h[kv*ldh:], ldh, nhv, h[kv*ldh+kt:], ldh, nhv, h[kwv*ldh:], ldh, work, lwork, recur) + + // Adjust kbot accounting for new deflations. + kbot -= ld + // ks points to the shifts. + ks := kbot - ls + 1 + + // Skip an expensive QR sweep if there is a (partly heuristic) + // reason to expect that many eigenvalues will deflate without + // it. Here, the QR sweep is skipped if many eigenvalues have + // just been deflated or if the remaining active block is small. + if ld > 0 && (100*ld > nw*nibble || kbot-ktop+1 <= min(nmin, nwmax)) { + // ld is positive, note progress. + ndfl = 1 + continue + } + + // ns is the nominal number of simultaneous shifts. This may be + // lowered (slightly) if Dlaqr23 did not provide that many + // shifts. + ns := min(min(nsmax, nsr), max(2, kbot-ktop)) &^ 1 + + // If there have been no deflations in a multiple of kexsh + // iterations, then try exceptional shifts. Otherwise use shifts + // provided by Dlaqr23 above or from the eigenvalues of a + // trailing principal submatrix. + if ndfl%kexsh == 0 { + ks = kbot - ns + 1 + for i := kbot; i > max(ks, ktop+1); i -= 2 { + ss := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) + aa := wilk1*ss + h[i*ldh+i] + _, _, _, _, wr[i-1], wi[i-1], wr[i], wi[i], _, _ = + impl.Dlanv2(aa, ss, wilk2*ss, aa) + } + if ks == ktop { + wr[ks+1] = h[(ks+1)*ldh+ks+1] + wi[ks+1] = 0 + wr[ks] = wr[ks+1] + wi[ks] = wi[ks+1] + } + } else { + // If we got ns/2 or fewer shifts, use Dlahqr or recur + // into Dlaqr04 on a trailing principal submatrix to get + // more. Since ns <= nsmax <=(n+6)/9, there is enough + // space below the subdiagonal to fit an ns×ns scratch + // array. + if kbot-ks+1 <= ns/2 { + ks = kbot - ns + 1 + kt = n - ns + impl.Dlacpy(blas.All, ns, ns, h[ks*ldh+ks:], ldh, h[kt*ldh:], ldh) + if ns > nmin && recur > 0 { + ks += impl.Dlaqr04(false, false, ns, 1, ns-1, h[kt*ldh:], ldh, + wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 0, work, lwork, recur-1) + } else { + ks += impl.Dlahqr(false, false, ns, 0, ns-1, h[kt*ldh:], ldh, + wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 0) + } + // In case of a rare QR failure use eigenvalues + // of the trailing 2×2 principal submatrix. + if ks >= kbot { + aa := h[(kbot-1)*ldh+kbot-1] + bb := h[(kbot-1)*ldh+kbot] + cc := h[kbot*ldh+kbot-1] + dd := h[kbot*ldh+kbot] + _, _, _, _, wr[kbot-1], wi[kbot-1], wr[kbot], wi[kbot], _, _ = + impl.Dlanv2(aa, bb, cc, dd) + ks = kbot - 1 + } + } + + if kbot-ks+1 > ns { + // Sorting the shifts helps a little. Bubble + // sort keeps complex conjugate pairs together. + sorted := false + for k := kbot; k > ks; k-- { + if sorted { + break + } + sorted = true + for i := ks; i < k; i++ { + if math.Abs(wr[i])+math.Abs(wi[i]) >= math.Abs(wr[i+1])+math.Abs(wi[i+1]) { + continue + } + sorted = false + wr[i], wr[i+1] = wr[i+1], wr[i] + wi[i], wi[i+1] = wi[i+1], wi[i] + } + } + } + + // Shuffle shifts into pairs of real shifts and pairs of + // complex conjugate shifts using the fact that complex + // conjugate shifts are already adjacent to one another. + // TODO(vladimir-ch): The shuffling here could probably + // be removed but I'm not sure right now and it's safer + // to leave it. + for i := kbot; i > ks+1; i -= 2 { + if wi[i] == -wi[i-1] { + continue + } + wr[i], wr[i-1], wr[i-2] = wr[i-1], wr[i-2], wr[i] + wi[i], wi[i-1], wi[i-2] = wi[i-1], wi[i-2], wi[i] + } + } + + // If there are only two shifts and both are real, then use only one. + if kbot-ks+1 == 2 && wi[kbot] == 0 { + if math.Abs(wr[kbot]-h[kbot*ldh+kbot]) < math.Abs(wr[kbot-1]-h[kbot*ldh+kbot]) { + wr[kbot-1] = wr[kbot] + } else { + wr[kbot] = wr[kbot-1] + } + } + + // Use up to ns of the the smallest magnitude shifts. If there + // aren't ns shifts available, then use them all, possibly + // dropping one to make the number of shifts even. + ns = min(ns, kbot-ks+1) &^ 1 + ks = kbot - ns + 1 + + // Split workspace under the subdiagonal into: + // - a kdu×kdu work array U in the lower left-hand-corner, + // - a kdu×nhv horizontal work array WH along the bottom edge + // (nhv must be at least kdu but more is better), + // - an nhv×kdu vertical work array WV along the left-hand-edge + // (nhv must be at least kdu but more is better). + kdu := 3*ns - 3 + ku := n - kdu + kwh := kdu + kwv = kdu + 3 + nhv = n - kwv - kdu + // Small-bulge multi-shift QR sweep. + impl.Dlaqr5(wantt, wantz, kacc22, n, ktop, kbot, ns, + wr[ks:ks+ns], wi[ks:ks+ns], h, ldh, iloz, ihiz, z, ldz, + work, 3, h[ku*ldh:], ldh, nhv, h[kwv*ldh:], ldh, nhv, h[ku*ldh+kwh:], ldh) + + // Note progress (or the lack of it). + if ld > 0 { + ndfl = 1 + } else { + ndfl++ + } + } + + work[0] = float64(lwkopt) + return unconverged +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go new file mode 100644 index 00000000..493b8e44 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaqr1 sets v to a scalar multiple of the first column of the product +// (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) +// where H is a 2×2 or 3×3 matrix, I is the identity matrix of the same size, +// and i is the imaginary unit. Scaling is done to avoid overflows and most +// underflows. +// +// n is the order of H and must be either 2 or 3. It must hold that either sr1 = +// sr2 and si1 = -si2, or si1 = si2 = 0. The length of v must be equal to n. If +// any of these conditions is not met, Dlaqr1 will panic. +// +// Dlaqr1 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr1(n int, h []float64, ldh int, sr1, si1, sr2, si2 float64, v []float64) { + if n != 2 && n != 3 { + panic(badDims) + } + checkMatrix(n, n, h, ldh) + if len(v) != n { + panic(badSlice) + } + if !((sr1 == sr2 && si1 == -si2) || (si1 == 0 && si2 == 0)) { + panic(badShifts) + } + + if n == 2 { + s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) + if s == 0 { + v[0] = 0 + v[1] = 0 + } else { + h21s := h[ldh] / s + v[0] = h21s*h[1] + (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) + v[1] = h21s * (h[0] + h[ldh+1] - sr1 - sr2) + } + return + } + + s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) + math.Abs(h[2*ldh]) + if s == 0 { + v[0] = 0 + v[1] = 0 + v[2] = 0 + } else { + h21s := h[ldh] / s + h31s := h[2*ldh] / s + v[0] = (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) + h[1]*h21s + h[2]*h31s + v[1] = h21s*(h[0]+h[ldh+1]-sr1-sr2) + h[ldh+2]*h31s + v[2] = h31s*(h[0]+h[2*ldh+2]-sr1-sr2) + h21s*h[2*ldh+1] + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go new file mode 100644 index 00000000..24fdf12b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go @@ -0,0 +1,403 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlaqr23 performs the orthogonal similarity transformation of an n×n upper +// Hessenberg matrix to detect and deflate fully converged eigenvalues from a +// trailing principal submatrix using aggressive early deflation [1]. +// +// On return, H will be overwritten by a new Hessenberg matrix that is a +// perturbation of an orthogonal similarity transformation of H. It is hoped +// that on output H will have many zero subdiagonal entries. +// +// If wantt is true, the matrix H will be fully updated so that the +// quasi-triangular Schur factor can be computed. If wantt is false, then only +// enough of H will be updated to preserve the eigenvalues. +// +// If wantz is true, the orthogonal similarity transformation will be +// accumulated into Z[iloz:ihiz+1,ktop:kbot+1], otherwise Z is not referenced. +// +// ktop and kbot determine a block [ktop:kbot+1,ktop:kbot+1] along the diagonal +// of H. It must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0, +// and the block must be isolated, that is, it must hold that +// ktop == 0 or H[ktop,ktop-1] == 0, +// kbot == n-1 or H[kbot+1,kbot] == 0, +// otherwise Dlaqr23 will panic. +// +// nw is the deflation window size. It must hold that +// 0 <= nw <= kbot-ktop+1, +// otherwise Dlaqr23 will panic. +// +// iloz and ihiz specify the rows of the n×n matrix Z to which transformations +// will be applied if wantz is true. It must hold that +// 0 <= iloz <= ktop, and kbot <= ihiz < n, +// otherwise Dlaqr23 will panic. +// +// sr and si must have length kbot+1, otherwise Dlaqr23 will panic. +// +// v and ldv represent an nw×nw work matrix. +// t and ldt represent an nw×nh work matrix, and nh must be at least nw. +// wv and ldwv represent an nv×nw work matrix. +// +// work must have length at least lwork and lwork must be at least max(1,2*nw), +// otherwise Dlaqr23 will panic. Larger values of lwork may result in greater +// efficiency. On return, work[0] will contain the optimal value of lwork. +// +// If lwork is -1, instead of performing Dlaqr23, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// recur is the non-negative recursion depth. For recur > 0, Dlaqr23 behaves +// as DLAQR3, for recur == 0 it behaves as DLAQR2. +// +// On return, ns and nd will contain respectively the number of unconverged +// (i.e., approximate) eigenvalues and converged eigenvalues that are stored in +// sr and si. +// +// On return, the real and imaginary parts of approximate eigenvalues that may +// be used for shifts will be stored respectively in sr[kbot-nd-ns+1:kbot-nd+1] +// and si[kbot-nd-ns+1:kbot-nd+1]. +// +// On return, the real and imaginary parts of converged eigenvalues will be +// stored respectively in sr[kbot-nd+1:kbot+1] and si[kbot-nd+1:kbot+1]. +// +// References: +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +func (impl Implementation) Dlaqr23(wantt, wantz bool, n, ktop, kbot, nw int, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, sr, si []float64, v []float64, ldv int, nh int, t []float64, ldt int, nv int, wv []float64, ldwv int, work []float64, lwork int, recur int) (ns, nd int) { + switch { + case ktop < 0 || max(0, n-1) < ktop: + panic("lapack: invalid value of ktop") + case kbot < min(ktop, n-1) || n <= kbot: + panic("lapack: invalid value of kbot") + case (nw < 0 || kbot-ktop+1 < nw) && lwork != -1: + panic("lapack: invalid value of nw") + case nh < nw: + panic("lapack: invalid value of nh") + case lwork < max(1, 2*nw) && lwork != -1: + panic(badWork) + case len(work) < lwork: + panic(shortWork) + case recur < 0: + panic("lapack: recur is negative") + } + if wantz { + switch { + case iloz < 0 || ktop < iloz: + panic("lapack: invalid value of iloz") + case ihiz < kbot || n <= ihiz: + panic("lapack: invalid value of ihiz") + } + } + if lwork != -1 { + // Check input slices only if not doing workspace query. + checkMatrix(n, n, h, ldh) + checkMatrix(nw, nw, v, ldv) + checkMatrix(nw, nh, t, ldt) + checkMatrix(nv, nw, wv, ldwv) + if wantz { + checkMatrix(n, n, z, ldz) + } + switch { + case ktop > 0 && h[ktop*ldh+ktop-1] != 0: + panic("lapack: block not isolated") + case kbot+1 < n && h[(kbot+1)*ldh+kbot] != 0: + panic("lapack: block not isolated") + case len(sr) != kbot+1: + panic("lapack: bad length of sr") + case len(si) != kbot+1: + panic("lapack: bad length of si") + } + } + + // Quick return for zero window size. + if nw == 0 { + work[0] = 1 + return 0, 0 + } + + // LAPACK code does not enforce the documented behavior + // nw <= kbot-ktop+1 + // but we do (we panic above). + jw := nw + lwkopt := max(1, 2*nw) + if jw > 2 { + // Workspace query call to Dgehrd. + impl.Dgehrd(jw, 0, jw-2, nil, 0, nil, work, -1) + lwk1 := int(work[0]) + // Workspace query call to Dormhr. + impl.Dormhr(blas.Right, blas.NoTrans, jw, jw, 0, jw-2, nil, 0, nil, nil, 0, work, -1) + lwk2 := int(work[0]) + if recur > 0 { + // Workspace query call to Dlaqr04. + impl.Dlaqr04(true, true, jw, 0, jw-1, nil, 0, nil, nil, 0, jw-1, nil, 0, work, -1, recur-1) + lwk3 := int(work[0]) + // Optimal workspace. + lwkopt = max(jw+max(lwk1, lwk2), lwk3) + } else { + // Optimal workspace. + lwkopt = jw + max(lwk1, lwk2) + } + } + // Quick return in case of workspace query. + if lwork == -1 { + work[0] = float64(lwkopt) + return 0, 0 + } + + // Machine constants. + ulp := dlamchP + smlnum := float64(n) / ulp * dlamchS + + // Setup deflation window. + var s float64 + kwtop := kbot - jw + 1 + if kwtop != ktop { + s = h[kwtop*ldh+kwtop-1] + } + if kwtop == kbot { + // 1×1 deflation window. + sr[kwtop] = h[kwtop*ldh+kwtop] + si[kwtop] = 0 + ns = 1 + nd = 0 + if math.Abs(s) <= math.Max(smlnum, ulp*math.Abs(h[kwtop*ldh+kwtop])) { + ns = 0 + nd = 1 + if kwtop > ktop { + h[kwtop*ldh+kwtop-1] = 0 + } + } + work[0] = 1 + return ns, nd + } + + // Convert to spike-triangular form. In case of a rare QR failure, this + // routine continues to do aggressive early deflation using that part of + // the deflation window that converged using infqr here and there to + // keep track. + impl.Dlacpy(blas.Upper, jw, jw, h[kwtop*ldh+kwtop:], ldh, t, ldt) + bi := blas64.Implementation() + bi.Dcopy(jw-1, h[(kwtop+1)*ldh+kwtop:], ldh+1, t[ldt:], ldt+1) + impl.Dlaset(blas.All, jw, jw, 0, 1, v, ldv) + nmin := impl.Ilaenv(12, "DLAQR3", "SV", jw, 0, jw-1, lwork) + var infqr int + if recur > 0 && jw > nmin { + infqr = impl.Dlaqr04(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv, work, lwork, recur-1) + } else { + infqr = impl.Dlahqr(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv) + } + // Note that ilo == 0 which conveniently coincides with the success + // value of infqr, that is, infqr as an index always points to the first + // converged eigenvalue. + + // Dtrexc needs a clean margin near the diagonal. + for j := 0; j < jw-3; j++ { + t[(j+2)*ldt+j] = 0 + t[(j+3)*ldt+j] = 0 + } + if jw >= 3 { + t[(jw-1)*ldt+jw-3] = 0 + } + + ns = jw + ilst := infqr + // Deflation detection loop. + for ilst < ns { + bulge := false + if ns >= 2 { + bulge = t[(ns-1)*ldt+ns-2] != 0 + } + if !bulge { + // Real eigenvalue. + abst := math.Abs(t[(ns-1)*ldt+ns-1]) + if abst == 0 { + abst = math.Abs(s) + } + if math.Abs(s*v[ns-1]) <= math.Max(smlnum, ulp*abst) { + // Deflatable. + ns-- + } else { + // Undeflatable, move it up out of the way. + // Dtrexc can not fail in this case. + _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) + ilst++ + } + continue + } + // Complex conjugate pair. + abst := math.Abs(t[(ns-1)*ldt+ns-1]) + math.Sqrt(math.Abs(t[(ns-1)*ldt+ns-2]))*math.Sqrt(math.Abs(t[(ns-2)*ldt+ns-1])) + if abst == 0 { + abst = math.Abs(s) + } + if math.Max(math.Abs(s*v[ns-1]), math.Abs(s*v[ns-2])) <= math.Max(smlnum, ulp*abst) { + // Deflatable. + ns -= 2 + } else { + // Undeflatable, move them up out of the way. + // Dtrexc does the right thing with ilst in case of a + // rare exchange failure. + _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) + ilst += 2 + } + } + + // Return to Hessenberg form. + if ns == 0 { + s = 0 + } + if ns < jw { + // Sorting diagonal blocks of T improves accuracy for graded + // matrices. Bubble sort deals well with exchange failures. + sorted := false + i := ns + for !sorted { + sorted = true + kend := i - 1 + i = infqr + var k int + if i == ns-1 || t[(i+1)*ldt+i] == 0 { + k = i + 1 + } else { + k = i + 2 + } + for k <= kend { + var evi float64 + if k == i+1 { + evi = math.Abs(t[i*ldt+i]) + } else { + evi = math.Abs(t[i*ldt+i]) + math.Sqrt(math.Abs(t[(i+1)*ldt+i]))*math.Sqrt(math.Abs(t[i*ldt+i+1])) + } + + var evk float64 + if k == kend || t[(k+1)*ldt+k] == 0 { + evk = math.Abs(t[k*ldt+k]) + } else { + evk = math.Abs(t[k*ldt+k]) + math.Sqrt(math.Abs(t[(k+1)*ldt+k]))*math.Sqrt(math.Abs(t[k*ldt+k+1])) + } + + if evi >= evk { + i = k + } else { + sorted = false + _, ilst, ok := impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, i, k, work) + if ok { + i = ilst + } else { + i = k + } + } + if i == kend || t[(i+1)*ldt+i] == 0 { + k = i + 1 + } else { + k = i + 2 + } + } + } + } + + // Restore shift/eigenvalue array from T. + for i := jw - 1; i >= infqr; { + if i == infqr || t[i*ldt+i-1] == 0 { + sr[kwtop+i] = t[i*ldt+i] + si[kwtop+i] = 0 + i-- + continue + } + aa := t[(i-1)*ldt+i-1] + bb := t[(i-1)*ldt+i] + cc := t[i*ldt+i-1] + dd := t[i*ldt+i] + _, _, _, _, sr[kwtop+i-1], si[kwtop+i-1], sr[kwtop+i], si[kwtop+i], _, _ = impl.Dlanv2(aa, bb, cc, dd) + i -= 2 + } + + if ns < jw || s == 0 { + if ns > 1 && s != 0 { + // Reflect spike back into lower triangle. + bi.Dcopy(ns, v[:ns], 1, work[:ns], 1) + _, tau := impl.Dlarfg(ns, work[0], work[1:ns], 1) + work[0] = 1 + impl.Dlaset(blas.Lower, jw-2, jw-2, 0, 0, t[2*ldt:], ldt) + impl.Dlarf(blas.Left, ns, jw, work[:ns], 1, tau, t, ldt, work[jw:]) + impl.Dlarf(blas.Right, ns, ns, work[:ns], 1, tau, t, ldt, work[jw:]) + impl.Dlarf(blas.Right, jw, ns, work[:ns], 1, tau, v, ldv, work[jw:]) + impl.Dgehrd(jw, 0, ns-1, t, ldt, work[:jw-1], work[jw:], lwork-jw) + } + + // Copy updated reduced window into place. + if kwtop > 0 { + h[kwtop*ldh+kwtop-1] = s * v[0] + } + impl.Dlacpy(blas.Upper, jw, jw, t, ldt, h[kwtop*ldh+kwtop:], ldh) + bi.Dcopy(jw-1, t[ldt:], ldt+1, h[(kwtop+1)*ldh+kwtop:], ldh+1) + + // Accumulate orthogonal matrix in order to update H and Z, if + // requested. + if ns > 1 && s != 0 { + // work[:ns-1] contains the elementary reflectors stored + // by a call to Dgehrd above. + impl.Dormhr(blas.Right, blas.NoTrans, jw, ns, 0, ns-1, + t, ldt, work[:ns-1], v, ldv, work[jw:], lwork-jw) + } + + // Update vertical slab in H. + var ltop int + if !wantt { + ltop = ktop + } + for krow := ltop; krow < kwtop; krow += nv { + kln := min(nv, kwtop-krow) + bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, + 1, h[krow*ldh+kwtop:], ldh, v, ldv, + 0, wv, ldwv) + impl.Dlacpy(blas.All, kln, jw, wv, ldwv, h[krow*ldh+kwtop:], ldh) + } + + // Update horizontal slab in H. + if wantt { + for kcol := kbot + 1; kcol < n; kcol += nh { + kln := min(nh, n-kcol) + bi.Dgemm(blas.Trans, blas.NoTrans, jw, kln, jw, + 1, v, ldv, h[kwtop*ldh+kcol:], ldh, + 0, t, ldt) + impl.Dlacpy(blas.All, jw, kln, t, ldt, h[kwtop*ldh+kcol:], ldh) + } + } + + // Update vertical slab in Z. + if wantz { + for krow := iloz; krow <= ihiz; krow += nv { + kln := min(nv, ihiz-krow+1) + bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, + 1, z[krow*ldz+kwtop:], ldz, v, ldv, + 0, wv, ldwv) + impl.Dlacpy(blas.All, kln, jw, wv, ldwv, z[krow*ldz+kwtop:], ldz) + } + } + } + + // The number of deflations. + nd = jw - ns + // Shifts are converged eigenvalues that could not be deflated. + // Subtracting infqr from the spike length takes care of the case of a + // rare QR failure while calculating eigenvalues of the deflation + // window. + ns -= infqr + work[0] = float64(lwkopt) + return ns, nd +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go new file mode 100644 index 00000000..48198122 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go @@ -0,0 +1,616 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqr5 performs a single small-bulge multi-shift QR sweep on an isolated +// block of a Hessenberg matrix. +// +// wantt and wantz determine whether the quasi-triangular Schur factor and the +// orthogonal Schur factor, respectively, will be computed. +// +// kacc22 specifies the computation mode of far-from-diagonal orthogonal +// updates. Permitted values are: +// 0: Dlaqr5 will not accumulate reflections and will not use matrix-matrix +// multiply to update far-from-diagonal matrix entries. +// 1: Dlaqr5 will accumulate reflections and use matrix-matrix multiply to +// update far-from-diagonal matrix entries. +// 2: Dlaqr5 will accumulate reflections, use matrix-matrix multiply to update +// far-from-diagonal matrix entries, and take advantage of 2×2 block +// structure during matrix multiplies. +// For other values of kacc2 Dlaqr5 will panic. +// +// n is the order of the Hessenberg matrix H. +// +// ktop and kbot are indices of the first and last row and column of an isolated +// diagonal block upon which the QR sweep will be applied. It must hold that +// ktop == 0, or 0 < ktop <= n-1 and H[ktop, ktop-1] == 0, and +// kbot == n-1, or 0 <= kbot < n-1 and H[kbot+1, kbot] == 0, +// otherwise Dlaqr5 will panic. +// +// nshfts is the number of simultaneous shifts. It must be positive and even, +// otherwise Dlaqr5 will panic. +// +// sr and si contain the real and imaginary parts, respectively, of the shifts +// of origin that define the multi-shift QR sweep. On return both slices may be +// reordered by Dlaqr5. Their length must be equal to nshfts, otherwise Dlaqr5 +// will panic. +// +// h and ldh represent the Hessenberg matrix H of size n×n. On return +// multi-shift QR sweep with shifts sr+i*si has been applied to the isolated +// diagonal block in rows and columns ktop through kbot, inclusive. +// +// iloz and ihiz specify the rows of Z to which transformations will be applied +// if wantz is true. It must hold that 0 <= iloz <= ihiz < n, otherwise Dlaqr5 +// will panic. +// +// z and ldz represent the matrix Z of size n×n. If wantz is true, the QR sweep +// orthogonal similarity transformation is accumulated into +// z[iloz:ihiz,iloz:ihiz] from the right, otherwise z not referenced. +// +// v and ldv represent an auxiliary matrix V of size (nshfts/2)×3. Note that V +// is transposed with respect to the reference netlib implementation. +// +// u and ldu represent an auxiliary matrix of size (3*nshfts-3)×(3*nshfts-3). +// +// wh and ldwh represent an auxiliary matrix of size (3*nshfts-3)×nh. +// +// wv and ldwv represent an auxiliary matrix of size nv×(3*nshfts-3). +// +// Dlaqr5 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr5(wantt, wantz bool, kacc22 int, n, ktop, kbot, nshfts int, sr, si []float64, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, v []float64, ldv int, u []float64, ldu int, nv int, wv []float64, ldwv int, nh int, wh []float64, ldwh int) { + checkMatrix(n, n, h, ldh) + if ktop < 0 || n <= ktop { + panic("lapack: invalid value of ktop") + } + if ktop > 0 && h[ktop*ldh+ktop-1] != 0 { + panic("lapack: diagonal block is not isolated") + } + if kbot < 0 || n <= kbot { + panic("lapack: invalid value of kbot") + } + if kbot < n-1 && h[(kbot+1)*ldh+kbot] != 0 { + panic("lapack: diagonal block is not isolated") + } + if nshfts < 0 || nshfts&0x1 != 0 { + panic("lapack: invalid number of shifts") + } + if len(sr) != nshfts || len(si) != nshfts { + panic(badSlice) // TODO(vladimir-ch) Another message? + } + if wantz { + if ihiz >= n { + panic("lapack: invalid value of ihiz") + } + if iloz < 0 || ihiz < iloz { + panic("lapack: invalid value of iloz") + } + checkMatrix(n, n, z, ldz) + } + checkMatrix(nshfts/2, 3, v, ldv) // Transposed w.r.t. lapack. + checkMatrix(3*nshfts-3, 3*nshfts-3, u, ldu) + checkMatrix(nv, 3*nshfts-3, wv, ldwv) + checkMatrix(3*nshfts-3, nh, wh, ldwh) + if kacc22 != 0 && kacc22 != 1 && kacc22 != 2 { + panic("lapack: invalid value of kacc22") + } + + // If there are no shifts, then there is nothing to do. + if nshfts < 2 { + return + } + // If the active block is empty or 1×1, then there is nothing to do. + if ktop >= kbot { + return + } + + // Shuffle shifts into pairs of real shifts and pairs of complex + // conjugate shifts assuming complex conjugate shifts are already + // adjacent to one another. + for i := 0; i < nshfts-2; i += 2 { + if si[i] == -si[i+1] { + continue + } + sr[i], sr[i+1], sr[i+2] = sr[i+1], sr[i+2], sr[i] + si[i], si[i+1], si[i+2] = si[i+1], si[i+2], si[i] + } + + // Note: lapack says that nshfts must be even but allows it to be odd + // anyway. We panic above if nshfts is not even, so reducing it by one + // is unnecessary. The only caller Dlaqr04 uses only even nshfts. + // + // The original comment and code from lapack-3.6.0/SRC/dlaqr5.f:341: + // * ==== NSHFTS is supposed to be even, but if it is odd, + // * . then simply reduce it by one. The shuffle above + // * . ensures that the dropped shift is real and that + // * . the remaining shifts are paired. ==== + // * + // NS = NSHFTS - MOD( NSHFTS, 2 ) + ns := nshfts + + safmin := dlamchS + ulp := dlamchP + smlnum := safmin * float64(n) / ulp + + // Use accumulated reflections to update far-from-diagonal entries? + accum := kacc22 == 1 || kacc22 == 2 + // If so, exploit the 2×2 block structure? + blk22 := ns > 2 && kacc22 == 2 + + // Clear trash. + if ktop+2 <= kbot { + h[(ktop+2)*ldh+ktop] = 0 + } + + // nbmps = number of 2-shift bulges in the chain. + nbmps := ns / 2 + + // kdu = width of slab. + kdu := 6*nbmps - 3 + + // Create and chase chains of nbmps bulges. + for incol := 3*(1-nbmps) + ktop - 1; incol <= kbot-2; incol += 3*nbmps - 2 { + ndcol := incol + kdu + if accum { + impl.Dlaset(blas.All, kdu, kdu, 0, 1, u, ldu) + } + + // Near-the-diagonal bulge chase. The following loop performs + // the near-the-diagonal part of a small bulge multi-shift QR + // sweep. Each 6*nbmps-2 column diagonal chunk extends from + // column incol to column ndcol (including both column incol and + // column ndcol). The following loop chases a 3*nbmps column + // long chain of nbmps bulges 3*nbmps-2 columns to the right. + // (incol may be less than ktop and ndcol may be greater than + // kbot indicating phantom columns from which to chase bulges + // before they are actually introduced or to which to chase + // bulges beyond column kbot.) + for krcol := incol; krcol <= min(incol+3*nbmps-3, kbot-2); krcol++ { + // Bulges number mtop to mbot are active double implicit + // shift bulges. There may or may not also be small 2×2 + // bulge, if there is room. The inactive bulges (if any) + // must wait until the active bulges have moved down the + // diagonal to make room. The phantom matrix paradigm + // described above helps keep track. + + mtop := max(0, ((ktop-1)-krcol+2)/3) + mbot := min(nbmps, (kbot-krcol)/3) - 1 + m22 := mbot + 1 + bmp22 := (mbot < nbmps-1) && (krcol+3*m22 == kbot-2) + + // Generate reflections to chase the chain right one + // column. (The minimum value of k is ktop-1.) + for m := mtop; m <= mbot; m++ { + k := krcol + 3*m + if k == ktop-1 { + impl.Dlaqr1(3, h[ktop*ldh+ktop:], ldh, + sr[2*m], si[2*m], sr[2*m+1], si[2*m+1], + v[m*ldv:m*ldv+3]) + alpha := v[m*ldv] + _, v[m*ldv] = impl.Dlarfg(3, alpha, v[m*ldv+1:m*ldv+3], 1) + continue + } + beta := h[(k+1)*ldh+k] + v[m*ldv+1] = h[(k+2)*ldh+k] + v[m*ldv+2] = h[(k+3)*ldh+k] + beta, v[m*ldv] = impl.Dlarfg(3, beta, v[m*ldv+1:m*ldv+3], 1) + + // A bulge may collapse because of vigilant deflation or + // destructive underflow. In the underflow case, try the + // two-small-subdiagonals trick to try to reinflate the + // bulge. + if h[(k+3)*ldh+k] != 0 || h[(k+3)*ldh+k+1] != 0 || h[(k+3)*ldh+k+2] == 0 { + // Typical case: not collapsed (yet). + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + continue + } + + // Atypical case: collapsed. Attempt to reintroduce + // ignoring H[k+1,k] and H[k+2,k]. If the fill + // resulting from the new reflector is too large, + // then abandon it. Otherwise, use the new one. + var vt [3]float64 + impl.Dlaqr1(3, h[(k+1)*ldh+k+1:], ldh, sr[2*m], + si[2*m], sr[2*m+1], si[2*m+1], vt[:]) + alpha := vt[0] + _, vt[0] = impl.Dlarfg(3, alpha, vt[1:3], 1) + refsum := vt[0] * (h[(k+1)*ldh+k] + vt[1]*h[(k+2)*ldh+k]) + + dsum := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) + math.Abs(h[(k+2)*ldh+k+2]) + if math.Abs(h[(k+2)*ldh+k]-refsum*vt[1])+math.Abs(refsum*vt[2]) > ulp*dsum { + // Starting a new bulge here would create + // non-negligible fill. Use the old one with + // trepidation. + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + continue + } else { + // Starting a new bulge here would create + // only negligible fill. Replace the old + // reflector with the new one. + h[(k+1)*ldh+k] -= refsum + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + v[m*ldv] = vt[0] + v[m*ldv+1] = vt[1] + v[m*ldv+2] = vt[2] + } + } + + // Generate a 2×2 reflection, if needed. + if bmp22 { + k := krcol + 3*m22 + if k == ktop-1 { + impl.Dlaqr1(2, h[(k+1)*ldh+k+1:], ldh, + sr[2*m22], si[2*m22], sr[2*m22+1], si[2*m22+1], + v[m22*ldv:m22*ldv+2]) + beta := v[m22*ldv] + _, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) + } else { + beta := h[(k+1)*ldh+k] + v[m22*ldv+1] = h[(k+2)*ldh+k] + beta, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + } + } + + // Multiply H by reflections from the left. + var jbot int + switch { + case accum: + jbot = min(ndcol, kbot) + case wantt: + jbot = n - 1 + default: + jbot = kbot + } + for j := max(ktop, krcol); j <= jbot; j++ { + mend := min(mbot+1, (j-krcol+2)/3) - 1 + for m := mtop; m <= mend; m++ { + k := krcol + 3*m + refsum := v[m*ldv] * (h[(k+1)*ldh+j] + + v[m*ldv+1]*h[(k+2)*ldh+j] + v[m*ldv+2]*h[(k+3)*ldh+j]) + h[(k+1)*ldh+j] -= refsum + h[(k+2)*ldh+j] -= refsum * v[m*ldv+1] + h[(k+3)*ldh+j] -= refsum * v[m*ldv+2] + } + } + if bmp22 { + k := krcol + 3*m22 + for j := max(k+1, ktop); j <= jbot; j++ { + refsum := v[m22*ldv] * (h[(k+1)*ldh+j] + v[m22*ldv+1]*h[(k+2)*ldh+j]) + h[(k+1)*ldh+j] -= refsum + h[(k+2)*ldh+j] -= refsum * v[m22*ldv+1] + } + } + + // Multiply H by reflections from the right. Delay filling in the last row + // until the vigilant deflation check is complete. + var jtop int + switch { + case accum: + jtop = max(ktop, incol) + case wantt: + jtop = 0 + default: + jtop = ktop + } + for m := mtop; m <= mbot; m++ { + if v[m*ldv] == 0 { + continue + } + k := krcol + 3*m + for j := jtop; j <= min(kbot, k+3); j++ { + refsum := v[m*ldv] * (h[j*ldh+k+1] + + v[m*ldv+1]*h[j*ldh+k+2] + v[m*ldv+2]*h[j*ldh+k+3]) + h[j*ldh+k+1] -= refsum + h[j*ldh+k+2] -= refsum * v[m*ldv+1] + h[j*ldh+k+3] -= refsum * v[m*ldv+2] + } + if accum { + // Accumulate U. (If necessary, update Z later with with an + // efficient matrix-matrix multiply.) + kms := k - incol + for j := max(0, ktop-incol-1); j < kdu; j++ { + refsum := v[m*ldv] * (u[j*ldu+kms] + + v[m*ldv+1]*u[j*ldu+kms+1] + v[m*ldv+2]*u[j*ldu+kms+2]) + u[j*ldu+kms] -= refsum + u[j*ldu+kms+1] -= refsum * v[m*ldv+1] + u[j*ldu+kms+2] -= refsum * v[m*ldv+2] + } + } else if wantz { + // U is not accumulated, so update Z now by multiplying by + // reflections from the right. + for j := iloz; j <= ihiz; j++ { + refsum := v[m*ldv] * (z[j*ldz+k+1] + + v[m*ldv+1]*z[j*ldz+k+2] + v[m*ldv+2]*z[j*ldz+k+3]) + z[j*ldz+k+1] -= refsum + z[j*ldz+k+2] -= refsum * v[m*ldv+1] + z[j*ldz+k+3] -= refsum * v[m*ldv+2] + } + } + } + + // Special case: 2×2 reflection (if needed). + if bmp22 && v[m22*ldv] != 0 { + k := krcol + 3*m22 + for j := jtop; j <= min(kbot, k+3); j++ { + refsum := v[m22*ldv] * (h[j*ldh+k+1] + v[m22*ldv+1]*h[j*ldh+k+2]) + h[j*ldh+k+1] -= refsum + h[j*ldh+k+2] -= refsum * v[m22*ldv+1] + } + if accum { + kms := k - incol + for j := max(0, ktop-incol-1); j < kdu; j++ { + refsum := v[m22*ldv] * (u[j*ldu+kms] + v[m22*ldv+1]*u[j*ldu+kms+1]) + u[j*ldu+kms] -= refsum + u[j*ldu+kms+1] -= refsum * v[m22*ldv+1] + } + } else if wantz { + for j := iloz; j <= ihiz; j++ { + refsum := v[m22*ldv] * (z[j*ldz+k+1] + v[m22*ldv+1]*z[j*ldz+k+2]) + z[j*ldz+k+1] -= refsum + z[j*ldz+k+2] -= refsum * v[m22*ldv+1] + } + } + } + + // Vigilant deflation check. + mstart := mtop + if krcol+3*mstart < ktop { + mstart++ + } + mend := mbot + if bmp22 { + mend++ + } + if krcol == kbot-2 { + mend++ + } + for m := mstart; m <= mend; m++ { + k := min(kbot-1, krcol+3*m) + + // The following convergence test requires that the tradition + // small-compared-to-nearby-diagonals criterion and the Ahues & + // Tisseur (LAWN 122, 1997) criteria both be satisfied. The latter + // improves accuracy in some examples. Falling back on an alternate + // convergence criterion when tst1 or tst2 is zero (as done here) is + // traditional but probably unnecessary. + + if h[(k+1)*ldh+k] == 0 { + continue + } + tst1 := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) + if tst1 == 0 { + if k >= ktop+1 { + tst1 += math.Abs(h[k*ldh+k-1]) + } + if k >= ktop+2 { + tst1 += math.Abs(h[k*ldh+k-2]) + } + if k >= ktop+3 { + tst1 += math.Abs(h[k*ldh+k-3]) + } + if k <= kbot-2 { + tst1 += math.Abs(h[(k+2)*ldh+k+1]) + } + if k <= kbot-3 { + tst1 += math.Abs(h[(k+3)*ldh+k+1]) + } + if k <= kbot-4 { + tst1 += math.Abs(h[(k+4)*ldh+k+1]) + } + } + if math.Abs(h[(k+1)*ldh+k]) <= math.Max(smlnum, ulp*tst1) { + h12 := math.Max(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) + h21 := math.Min(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) + h11 := math.Max(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) + h22 := math.Min(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) + scl := h11 + h12 + tst2 := h22 * (h11 / scl) + if tst2 == 0 || h21*(h12/scl) <= math.Max(smlnum, ulp*tst2) { + h[(k+1)*ldh+k] = 0 + } + } + } + + // Fill in the last row of each bulge. + mend = min(nbmps, (kbot-krcol-1)/3) - 1 + for m := mtop; m <= mend; m++ { + k := krcol + 3*m + refsum := v[m*ldv] * v[m*ldv+2] * h[(k+4)*ldh+k+3] + h[(k+4)*ldh+k+1] = -refsum + h[(k+4)*ldh+k+2] = -refsum * v[m*ldv+1] + h[(k+4)*ldh+k+3] -= refsum * v[m*ldv+2] + } + } + + // Use U (if accumulated) to update far-from-diagonal entries in H. + // If required, use U to update Z as well. + if !accum { + continue + } + var jtop, jbot int + if wantt { + jtop = 0 + jbot = n - 1 + } else { + jtop = ktop + jbot = kbot + } + bi := blas64.Implementation() + if !blk22 || incol < ktop || kbot < ndcol || ns <= 2 { + // Updates not exploiting the 2×2 block structure of U. k0 and nu keep track + // of the location and size of U in the special cases of introducing bulges + // and chasing bulges off the bottom. In these special cases and in case the + // number of shifts is ns = 2, there is no 2×2 block structure to exploit. + + k0 := max(0, ktop-incol-1) + nu := kdu - max(0, ndcol-kbot) - k0 + + // Horizontal multiply. + for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { + jlen := min(nh, jbot-jcol+1) + bi.Dgemm(blas.Trans, blas.NoTrans, nu, jlen, nu, + 1, u[k0*ldu+k0:], ldu, + h[(incol+k0+1)*ldh+jcol:], ldh, + 0, wh, ldwh) + impl.Dlacpy(blas.All, nu, jlen, wh, ldwh, h[(incol+k0+1)*ldh+jcol:], ldh) + } + + // Vertical multiply. + for jrow := jtop; jrow <= max(ktop, incol)-1; jrow += nv { + jlen := min(nv, max(ktop, incol)-jrow) + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, + 1, h[jrow*ldh+incol+k0+1:], ldh, + u[k0*ldu+k0:], ldu, + 0, wv, ldwv) + impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, h[jrow*ldh+incol+k0+1:], ldh) + } + + // Z multiply (also vertical). + if wantz { + for jrow := iloz; jrow <= ihiz; jrow += nv { + jlen := min(nv, ihiz-jrow+1) + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, + 1, z[jrow*ldz+incol+k0+1:], ldz, + u[k0*ldu+k0:], ldu, + 0, wv, ldwv) + impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, z[jrow*ldz+incol+k0+1:], ldz) + } + } + + continue + } + + // Updates exploiting U's 2×2 block structure. + + // i2, i4, j2, j4 are the last rows and columns of the blocks. + i2 := (kdu + 1) / 2 + i4 := kdu + j2 := i4 - i2 + j4 := kdu + + // kzs and knz deal with the band of zeros along the diagonal of one of the + // triangular blocks. + kzs := (j4 - j2) - (ns + 1) + knz := ns + 1 + + // Horizontal multiply. + for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { + jlen := min(nh, jbot-jcol+1) + + // Copy bottom of H to top+kzs of scratch (the first kzs + // rows get multiplied by zero). + impl.Dlacpy(blas.All, knz, jlen, h[(incol+1+j2)*ldh+jcol:], ldh, wh[kzs*ldwh:], ldwh) + + // Multiply by U21^T. + impl.Dlaset(blas.All, kzs, jlen, 0, 0, wh, ldwh) + bi.Dtrmm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, knz, jlen, + 1, u[j2*ldu+kzs:], ldu, wh[kzs*ldwh:], ldwh) + + // Multiply top of H by U11^T. + bi.Dgemm(blas.Trans, blas.NoTrans, i2, jlen, j2, + 1, u, ldu, h[(incol+1)*ldh+jcol:], ldh, + 1, wh, ldwh) + + // Copy top of H to bottom of WH. + impl.Dlacpy(blas.All, j2, jlen, h[(incol+1)*ldh+jcol:], ldh, wh[i2*ldwh:], ldwh) + + // Multiply by U21^T. + bi.Dtrmm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, j2, jlen, + 1, u[i2:], ldu, wh[i2*ldwh:], ldwh) + + // Multiply by U22. + bi.Dgemm(blas.Trans, blas.NoTrans, i4-i2, jlen, j4-j2, + 1, u[j2*ldu+i2:], ldu, h[(incol+1+j2)*ldh+jcol:], ldh, + 1, wh[i2*ldwh:], ldwh) + + // Copy it back. + impl.Dlacpy(blas.All, kdu, jlen, wh, ldwh, h[(incol+1)*ldh+jcol:], ldh) + } + + // Vertical multiply. + for jrow := jtop; jrow <= max(incol, ktop)-1; jrow += nv { + jlen := min(nv, max(incol, ktop)-jrow) + + // Copy right of H to scratch (the first kzs columns get multiplied + // by zero). + impl.Dlacpy(blas.All, jlen, knz, h[jrow*ldh+incol+1+j2:], ldh, wv[kzs:], ldwv) + + // Multiply by U21. + impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, + 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) + + // Multiply by U11. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, + 1, h[jrow*ldh+incol+1:], ldh, u, ldu, + 1, wv, ldwv) + + // Copy left of H to right of scratch. + impl.Dlacpy(blas.All, jlen, j2, h[jrow*ldh+incol+1:], ldh, wv[i2:], ldwv) + + // Multiply by U21. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, + 1, u[i2:], ldu, wv[i2:], ldwv) + + // Multiply by U22. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, + 1, h[jrow*ldh+incol+1+j2:], ldh, u[j2*ldu+i2:], ldu, + 1, wv[i2:], ldwv) + + // Copy it back. + impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, h[jrow*ldh+incol+1:], ldh) + } + + if !wantz { + continue + } + // Multiply Z (also vertical). + for jrow := iloz; jrow <= ihiz; jrow += nv { + jlen := min(nv, ihiz-jrow+1) + + // Copy right of Z to left of scratch (first kzs columns get + // multiplied by zero). + impl.Dlacpy(blas.All, jlen, knz, z[jrow*ldz+incol+1+j2:], ldz, wv[kzs:], ldwv) + + // Multiply by U12. + impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, + 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) + + // Multiply by U11. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, + 1, z[jrow*ldz+incol+1:], ldz, u, ldu, + 1, wv, ldwv) + + // Copy left of Z to right of scratch. + impl.Dlacpy(blas.All, jlen, j2, z[jrow*ldz+incol+1:], ldz, wv[i2:], ldwv) + + // Multiply by U21. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, + 1, u[i2:], ldu, wv[i2:], ldwv) + + // Multiply by U22. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, + 1, z[jrow*ldz+incol+1+j2:], ldz, u[j2*ldu+i2:], ldu, + 1, wv[i2:], ldwv) + + // Copy the result back to Z. + impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, z[jrow*ldz+incol+1:], ldz) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go new file mode 100644 index 00000000..5fe24f4a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go @@ -0,0 +1,83 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlarf applies an elementary reflector to a general rectangular matrix c. +// This computes +// c = h * c if side == Left +// c = c * h if side == right +// where +// h = 1 - tau * v * v^T +// and c is an m * n matrix. +// +// work is temporary storage of length at least m if side == Left and at least +// n if side == Right. This function will panic if this length requirement is not met. +// +// Dlarf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarf(side blas.Side, m, n int, v []float64, incv int, tau float64, c []float64, ldc int, work []float64) { + applyleft := side == blas.Left + if (applyleft && len(work) < n) || (!applyleft && len(work) < m) { + panic(badWork) + } + checkMatrix(m, n, c, ldc) + + // v has length m if applyleft and n otherwise. + lenV := n + if applyleft { + lenV = m + } + + checkVector(lenV, v, incv) + + lastv := 0 // last non-zero element of v + lastc := 0 // last non-zero row/column of c + if tau != 0 { + var i int + if applyleft { + lastv = m - 1 + } else { + lastv = n - 1 + } + if incv > 0 { + i = lastv * incv + } + + // Look for the last non-zero row in v. + for lastv >= 0 && v[i] == 0 { + lastv-- + i -= incv + } + if applyleft { + // Scan for the last non-zero column in C[0:lastv, :] + lastc = impl.Iladlc(lastv+1, n, c, ldc) + } else { + // Scan for the last non-zero row in C[:, 0:lastv] + lastc = impl.Iladlr(m, lastv+1, c, ldc) + } + } + if lastv == -1 || lastc == -1 { + return + } + // Sometimes 1-indexing is nicer ... + bi := blas64.Implementation() + if applyleft { + // Form H * C + // w[0:lastc+1] = c[1:lastv+1, 1:lastc+1]^T * v[1:lastv+1,1] + bi.Dgemv(blas.Trans, lastv+1, lastc+1, 1, c, ldc, v, incv, 0, work, 1) + // c[0: lastv, 0: lastc] = c[...] - w[0:lastv, 1] * v[1:lastc, 1]^T + bi.Dger(lastv+1, lastc+1, -tau, v, incv, work, 1, c, ldc) + return + } + // Form C*H + // w[0:lastc+1,1] := c[0:lastc+1,0:lastv+1] * v[0:lastv+1,1] + bi.Dgemv(blas.NoTrans, lastc+1, lastv+1, 1, c, ldc, v, incv, 0, work, 1) + // c[0:lastc+1,0:lastv+1] = c[...] - w[0:lastc+1,0] * v[0:lastv+1,0]^T + bi.Dger(lastc+1, lastv+1, -tau, work, 1, v, incv, c, ldc) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go new file mode 100644 index 00000000..3de2684b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go @@ -0,0 +1,431 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlarfb applies a block reflector to a matrix. +// +// In the call to Dlarfb, the mxn c is multiplied by the implicitly defined matrix h as follows: +// c = h * c if side == Left and trans == NoTrans +// c = c * h if side == Right and trans == NoTrans +// c = h^T * c if side == Left and trans == Trans +// c = c * h^T if side == Right and trans == Trans +// h is a product of elementary reflectors. direct sets the direction of multiplication +// h = h_1 * h_2 * ... * h_k if direct == Forward +// h = h_k * h_k-1 * ... * h_1 if direct == Backward +// The combination of direct and store defines the orientation of the elementary +// reflectors. In all cases the ones on the diagonal are implicitly represented. +// +// If direct == lapack.Forward and store == lapack.ColumnWise +// V = [ 1 ] +// [v1 1 ] +// [v1 v2 1] +// [v1 v2 v3] +// [v1 v2 v3] +// If direct == lapack.Forward and store == lapack.RowWise +// V = [ 1 v1 v1 v1 v1] +// [ 1 v2 v2 v2] +// [ 1 v3 v3] +// If direct == lapack.Backward and store == lapack.ColumnWise +// V = [v1 v2 v3] +// [v1 v2 v3] +// [ 1 v2 v3] +// [ 1 v3] +// [ 1] +// If direct == lapack.Backward and store == lapack.RowWise +// V = [v1 v1 1 ] +// [v2 v2 v2 1 ] +// [v3 v3 v3 v3 1] +// An elementary reflector can be explicitly constructed by extracting the +// corresponding elements of v, placing a 1 where the diagonal would be, and +// placing zeros in the remaining elements. +// +// t is a k×k matrix containing the block reflector, and this function will panic +// if t is not of sufficient size. See Dlarft for more information. +// +// work is a temporary storage matrix with stride ldwork. +// work must be of size at least n×k side == Left and m×k if side == Right, and +// this function will panic if this size is not met. +// +// Dlarfb is an internal routine. It is exported for testing purposes. +func (Implementation) Dlarfb(side blas.Side, trans blas.Transpose, direct lapack.Direct, store lapack.StoreV, m, n, k int, v []float64, ldv int, t []float64, ldt int, c []float64, ldc int, work []float64, ldwork int) { + if side != blas.Left && side != blas.Right { + panic(badSide) + } + if trans != blas.Trans && trans != blas.NoTrans { + panic(badTrans) + } + if direct != lapack.Forward && direct != lapack.Backward { + panic(badDirect) + } + if store != lapack.ColumnWise && store != lapack.RowWise { + panic(badStore) + } + checkMatrix(m, n, c, ldc) + if k < 0 { + panic(kLT0) + } + checkMatrix(k, k, t, ldt) + nv := m + nw := n + if side == blas.Right { + nv = n + nw = m + } + if store == lapack.ColumnWise { + checkMatrix(nv, k, v, ldv) + } else { + checkMatrix(k, nv, v, ldv) + } + checkMatrix(nw, k, work, ldwork) + + if m == 0 || n == 0 { + return + } + + bi := blas64.Implementation() + + transt := blas.Trans + if trans == blas.Trans { + transt = blas.NoTrans + } + // TODO(btracey): This follows the original Lapack code where the + // elements are copied into the columns of the working array. The + // loops should go in the other direction so the data is written + // into the rows of work so the copy is not strided. A bigger change + // would be to replace work with work^T, but benchmarks would be + // needed to see if the change is merited. + if store == lapack.ColumnWise { + if direct == lapack.Forward { + // V1 is the first k rows of C. V2 is the remaining rows. + if side == blas.Left { + // W = C^T V = C1^T V1 + C2^T V2 (stored in work). + + // W = C1. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) + } + // W = W * V1. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, + n, k, 1, + v, ldv, + work, ldwork) + if m > k { + // W = W + C2^T V2. + bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, + 1, c[k*ldc:], ldc, v[k*ldv:], ldv, + 1, work, ldwork) + } + // W = W * T^T or W * T. + bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V * W^T. + if m > k { + // C2 -= V2 * W^T. + bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, + -1, v[k*ldv:], ldv, work, ldwork, + 1, c[k*ldc:], ldc) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + // C1 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[j*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C = C * H or C * H^T, where C = (C1 C2). + + // W = C1. + for i := 0; i < k; i++ { + bi.Dcopy(m, c[i:], ldc, work[i:], ldwork) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, + 1, c[k:], ldc, v[k*ldv:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, + -1, work, ldwork, v[k*ldv:], ldv, + 1, c[k:], ldc) + } + // C -= W * V^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + // C -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+j] -= work[i*ldwork+j] + } + } + return + } + // V = (V1) + // = (V2) (last k rows) + // Where V2 is unit upper triangular. + if side == blas.Left { + // Form H * C or + // W = C^T V. + + // W = C2^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, + 1, v[(m-k)*ldv:], ldv, + work, ldwork) + if m > k { + // W += C1^T * V1. + bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V * W^T. + if m > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, + -1, v, ldv, work, ldwork, + 1, c, ldc) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, + 1, v[(m-k)*ldv:], ldv, + work, ldwork) + // C2 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[(m-k+j)*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V. + + // W = C2. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) + } + + // W = W * V2. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, + 1, v[(n-k)*ldv:], ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V^T. + if n > k { + // C1 -= W * V1^T. + bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, + -1, work, ldwork, v, ldv, + 1, c, ldc) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, + 1, v[(n-k)*ldv:], ldv, + work, ldwork) + // C2 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+n-k+j] -= work[i*ldwork+j] + } + } + return + } + // Store = Rowwise. + if direct == lapack.Forward { + // V = (V1 V2) where v1 is unit upper triangular. + if side == blas.Left { + // Form H * C or H^T * C where C = (C1; C2). + // W = C^T * V^T. + + // W = C1^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, + 1, c[k*ldc:], ldc, v[k:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V^T * W^T. + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, + -1, v[k:], ldv, work, ldwork, + 1, c[k*ldc:], ldc) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + // C1 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[j*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V^T. + + // W = C1. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[j:], ldc, work[j:], ldwork) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, + 1, c[k:], ldc, v[k:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V. + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, + -1, work, ldwork, v[k:], ldv, + 1, c[k:], ldc) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + // C1 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+j] -= work[i*ldwork+j] + } + } + return + } + // V = (V1 V2) where V2 is the last k columns and is lower unit triangular. + if side == blas.Left { + // Form H * C or H^T C where C = (C1 ; C2). + // W = C^T * V^T. + + // W = C2^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, + 1, v[m-k:], ldv, + work, ldwork) + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V^T * W^T. + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, + -1, v, ldv, work, ldwork, + 1, c, ldc) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, k, + 1, v[m-k:], ldv, + work, ldwork) + // C2 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[(m-k+j)*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V^T. + // W = C2. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, + 1, v[n-k:], ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V. + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, + -1, work, ldwork, v, ldv, + 1, c, ldc) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, + 1, v[n-k:], ldv, + work, ldwork) + // C1 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+n-k+j] -= work[i*ldwork+j] + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go new file mode 100644 index 00000000..52a67a46 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go @@ -0,0 +1,62 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlarfg generates an elementary reflector for a Householder matrix. It creates +// a real elementary reflector of order n such that +// H * (alpha) = (beta) +// ( x) ( 0) +// H^T * H = I +// H is represented in the form +// H = 1 - tau * (1; v) * (1 v^T) +// where tau is a real scalar. +// +// On entry, x contains the vector x, on exit it contains v. +// +// Dlarfg is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarfg(n int, alpha float64, x []float64, incX int) (beta, tau float64) { + if n < 0 { + panic(nLT0) + } + if n <= 1 { + return alpha, 0 + } + checkVector(n-1, x, incX) + bi := blas64.Implementation() + xnorm := bi.Dnrm2(n-1, x, incX) + if xnorm == 0 { + return alpha, 0 + } + beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) + safmin := dlamchS / dlamchE + knt := 0 + if math.Abs(beta) < safmin { + // xnorm and beta may be inaccurate, scale x and recompute. + rsafmn := 1 / safmin + for { + knt++ + bi.Dscal(n-1, rsafmn, x, incX) + beta *= rsafmn + alpha *= rsafmn + if math.Abs(beta) >= safmin { + break + } + } + xnorm = bi.Dnrm2(n-1, x, incX) + beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) + } + tau = (beta - alpha) / beta + bi.Dscal(n-1, 1/(alpha-beta), x, incX) + for j := 0; j < knt; j++ { + beta *= safmin + } + return beta, tau +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go new file mode 100644 index 00000000..eaec8132 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go @@ -0,0 +1,150 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlarft forms the triangular factor T of a block reflector H, storing the answer +// in t. +// H = I - V * T * V^T if store == lapack.ColumnWise +// H = I - V^T * T * V if store == lapack.RowWise +// H is defined by a product of the elementary reflectors where +// H = H_0 * H_1 * ... * H_{k-1} if direct == lapack.Forward +// H = H_{k-1} * ... * H_1 * H_0 if direct == lapack.Backward +// +// t is a k×k triangular matrix. t is upper triangular if direct = lapack.Forward +// and lower triangular otherwise. This function will panic if t is not of +// sufficient size. +// +// store describes the storage of the elementary reflectors in v. See +// Dlarfb for a description of layout. +// +// tau contains the scalar factors of the elementary reflectors H_i. +// +// Dlarft is an internal routine. It is exported for testing purposes. +func (Implementation) Dlarft(direct lapack.Direct, store lapack.StoreV, n, k int, + v []float64, ldv int, tau []float64, t []float64, ldt int) { + if n == 0 { + return + } + if n < 0 || k < 0 { + panic(negDimension) + } + if direct != lapack.Forward && direct != lapack.Backward { + panic(badDirect) + } + if store != lapack.RowWise && store != lapack.ColumnWise { + panic(badStore) + } + if len(tau) < k { + panic(badTau) + } + checkMatrix(k, k, t, ldt) + bi := blas64.Implementation() + // TODO(btracey): There are a number of minor obvious loop optimizations here. + // TODO(btracey): It may be possible to rearrange some of the code so that + // index of 1 is more common in the Dgemv. + if direct == lapack.Forward { + prevlastv := n - 1 + for i := 0; i < k; i++ { + prevlastv = max(i, prevlastv) + if tau[i] == 0 { + for j := 0; j <= i; j++ { + t[j*ldt+i] = 0 + } + continue + } + var lastv int + if store == lapack.ColumnWise { + // skip trailing zeros + for lastv = n - 1; lastv >= i+1; lastv-- { + if v[lastv*ldv+i] != 0 { + break + } + } + for j := 0; j < i; j++ { + t[j*ldt+i] = -tau[i] * v[i*ldv+j] + } + j := min(lastv, prevlastv) + bi.Dgemv(blas.Trans, j-i, i, + -tau[i], v[(i+1)*ldv:], ldv, v[(i+1)*ldv+i:], ldv, + 1, t[i:], ldt) + } else { + for lastv = n - 1; lastv >= i+1; lastv-- { + if v[i*ldv+lastv] != 0 { + break + } + } + for j := 0; j < i; j++ { + t[j*ldt+i] = -tau[i] * v[j*ldv+i] + } + j := min(lastv, prevlastv) + bi.Dgemv(blas.NoTrans, i, j-i, + -tau[i], v[i+1:], ldv, v[i*ldv+i+1:], 1, + 1, t[i:], ldt) + } + bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, t, ldt, t[i:], ldt) + t[i*ldt+i] = tau[i] + if i > 1 { + prevlastv = max(prevlastv, lastv) + } else { + prevlastv = lastv + } + } + return + } + prevlastv := 0 + for i := k - 1; i >= 0; i-- { + if tau[i] == 0 { + for j := i; j < k; j++ { + t[j*ldt+i] = 0 + } + continue + } + var lastv int + if i < k-1 { + if store == lapack.ColumnWise { + for lastv = 0; lastv < i; lastv++ { + if v[lastv*ldv+i] != 0 { + break + } + } + for j := i + 1; j < k; j++ { + t[j*ldt+i] = -tau[i] * v[(n-k+i)*ldv+j] + } + j := max(lastv, prevlastv) + bi.Dgemv(blas.Trans, n-k+i-j, k-i-1, + -tau[i], v[j*ldv+i+1:], ldv, v[j*ldv+i:], ldv, + 1, t[(i+1)*ldt+i:], ldt) + } else { + for lastv = 0; lastv < i; lastv++ { + if v[i*ldv+lastv] != 0 { + break + } + } + for j := i + 1; j < k; j++ { + t[j*ldt+i] = -tau[i] * v[j*ldv+n-k+i] + } + j := max(lastv, prevlastv) + bi.Dgemv(blas.NoTrans, k-i-1, n-k+i-j, + -tau[i], v[(i+1)*ldv+j:], ldv, v[i*ldv+j:], 1, + 1, t[(i+1)*ldt+i:], ldt) + } + bi.Dtrmv(blas.Lower, blas.NoTrans, blas.NonUnit, k-i-1, + t[(i+1)*ldt+i+1:], ldt, + t[(i+1)*ldt+i:], ldt) + if i > 0 { + prevlastv = min(prevlastv, lastv) + } else { + prevlastv = lastv + } + } + t[i*ldt+i] = tau[i] + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go new file mode 100644 index 00000000..6b3f905c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go @@ -0,0 +1,535 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlarfx applies an elementary reflector H to a real m×n matrix C, from either +// the left or the right, with loop unrolling when the reflector has order less +// than 11. +// +// H is represented in the form +// H = I - tau * v * v^T, +// where tau is a real scalar and v is a real vector. If tau = 0, then H is +// taken to be the identity matrix. +// +// v must have length equal to m if side == blas.Left, and equal to n if side == +// blas.Right, otherwise Dlarfx will panic. +// +// c and ldc represent the m×n matrix C. On return, C is overwritten by the +// matrix H * C if side == blas.Left, or C * H if side == blas.Right. +// +// work must have length at least n if side == blas.Left, and at least m if side +// == blas.Right, otherwise Dlarfx will panic. work is not referenced if H has +// order < 11. +// +// Dlarfx is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarfx(side blas.Side, m, n int, v []float64, tau float64, c []float64, ldc int, work []float64) { + checkMatrix(m, n, c, ldc) + switch side { + case blas.Left: + checkVector(m, v, 1) + if m > 10 && len(work) < n { + panic(badWork) + } + case blas.Right: + checkVector(n, v, 1) + if n > 10 && len(work) < m { + panic(badWork) + } + default: + panic(badSide) + } + + if tau == 0 { + return + } + + if side == blas.Left { + // Form H * C, where H has order m. + switch m { + default: // Code for general m. + impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) + return + + case 0: // No-op for zero size matrix. + return + + case 1: // Special code for 1×1 Householder matrix. + t0 := 1 - tau*v[0]*v[0] + for j := 0; j < n; j++ { + c[j] *= t0 + } + return + + case 2: // Special code for 2×2 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + } + return + + case 3: // Special code for 3×3 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + } + return + + case 4: // Special code for 4×4 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + } + return + + case 5: // Special code for 5×5 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + } + return + + case 6: // Special code for 6×6 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + } + return + + case 7: // Special code for 7×7 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + } + return + + case 8: // Special code for 8×8 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + } + return + + case 9: // Special code for 9×9 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + c[8*ldc+j] -= sum * t8 + } + return + + case 10: // Special code for 10×10 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + v9 := v[9] + t9 := tau * v9 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] + v9*c[9*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + c[8*ldc+j] -= sum * t8 + c[9*ldc+j] -= sum * t9 + } + return + } + } + + // Form C * H, where H has order n. + switch n { + default: // Code for general n. + impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) + return + + case 0: // No-op for zero size matrix. + return + + case 1: // Special code for 1×1 Householder matrix. + t0 := 1 - tau*v[0]*v[0] + for j := 0; j < m; j++ { + c[j*ldc] *= t0 + } + return + + case 2: // Special code for 2×2 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + } + return + + case 3: // Special code for 3×3 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + } + return + + case 4: // Special code for 4×4 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + } + return + + case 5: // Special code for 5×5 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + } + return + + case 6: // Special code for 6×6 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + v5*cs[5] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + } + return + + case 7: // Special code for 7×7 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + } + return + + case 8: // Special code for 8×8 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + } + return + + case 9: // Special code for 9×9 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + cs[8] -= sum * t8 + } + return + + case 10: // Special code for 10×10 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + v9 := v[9] + t9 := tau * v9 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] + v9*cs[9] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + cs[8] -= sum * t8 + cs[9] -= sum * t9 + } + return + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go new file mode 100644 index 00000000..ad645461 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go @@ -0,0 +1,80 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlartg generates a plane rotation so that +// [ cs sn] * [f] = [r] +// [-sn cs] [g] = [0] +// This is a more accurate version of BLAS drotg, with the other differences that +// if g = 0, then cs = 1 and sn = 0, and if f = 0 and g != 0, then cs = 0 and sn = 1. +// If abs(f) > abs(g), cs will be positive. +// +// Dlartg is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlartg(f, g float64) (cs, sn, r float64) { + safmn2 := math.Pow(dlamchB, math.Trunc(math.Log(dlamchS/dlamchE)/math.Log(dlamchB)/2)) + safmx2 := 1 / safmn2 + if g == 0 { + cs = 1 + sn = 0 + r = f + return cs, sn, r + } + if f == 0 { + cs = 0 + sn = 1 + r = g + return cs, sn, r + } + f1 := f + g1 := g + scale := math.Max(math.Abs(f1), math.Abs(g1)) + if scale >= safmx2 { + var count int + for { + count++ + f1 *= safmn2 + g1 *= safmn2 + scale = math.Max(math.Abs(f1), math.Abs(g1)) + if scale < safmx2 { + break + } + } + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + for i := 0; i < count; i++ { + r *= safmx2 + } + } else if scale <= safmn2 { + var count int + for { + count++ + f1 *= safmx2 + g1 *= safmx2 + scale = math.Max(math.Abs(f1), math.Abs(g1)) + if scale >= safmn2 { + break + } + } + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + for i := 0; i < count; i++ { + r *= safmn2 + } + } else { + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + } + if math.Abs(f) > math.Abs(g) && cs < 0 { + cs *= -1 + sn *= -1 + r *= -1 + } + return cs, sn, r +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go new file mode 100644 index 00000000..9922b4aa --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go @@ -0,0 +1,43 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlas2 computes the singular values of the 2×2 matrix defined by +// [F G] +// [0 H] +// The smaller and larger singular values are returned in that order. +// +// Dlas2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlas2(f, g, h float64) (ssmin, ssmax float64) { + fa := math.Abs(f) + ga := math.Abs(g) + ha := math.Abs(h) + fhmin := math.Min(fa, ha) + fhmax := math.Max(fa, ha) + if fhmin == 0 { + if fhmax == 0 { + return 0, ga + } + v := math.Min(fhmax, ga) / math.Max(fhmax, ga) + return 0, math.Max(fhmax, ga) * math.Sqrt(1+v*v) + } + if ga < fhmax { + as := 1 + fhmin/fhmax + at := (fhmax - fhmin) / fhmax + au := (ga / fhmax) * (ga / fhmax) + c := 2 / (math.Sqrt(as*as+au) + math.Sqrt(at*at+au)) + return fhmin * c, fhmax / c + } + au := fhmax / ga + if au == 0 { + return fhmin * fhmax / ga, ga + } + as := 1 + fhmin/fhmax + at := (fhmax - fhmin) / fhmax + c := 1 / (math.Sqrt(1+(as*au)*(as*au)) + math.Sqrt(1+(at*au)*(at*au))) + return 2 * (fhmin * c) * au, ga / (c + c) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go new file mode 100644 index 00000000..51363fe5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go @@ -0,0 +1,89 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlascl multiplies an m×n matrix by the scalar cto/cfrom. +// +// cfrom must not be zero, and cto and cfrom must not be NaN, otherwise Dlascl +// will panic. +// +// Dlascl is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlascl(kind lapack.MatrixType, kl, ku int, cfrom, cto float64, m, n int, a []float64, lda int) { + checkMatrix(m, n, a, lda) + if cfrom == 0 { + panic(zeroDiv) + } + if math.IsNaN(cfrom) || math.IsNaN(cto) { + panic(nanScale) + } + if n == 0 || m == 0 { + return + } + smlnum := dlamchS + bignum := 1 / smlnum + cfromc := cfrom + ctoc := cto + cfrom1 := cfromc * smlnum + for { + var done bool + var mul, ctol float64 + if cfrom1 == cfromc { + // cfromc is inf. + mul = ctoc / cfromc + done = true + ctol = ctoc + } else { + ctol = ctoc / bignum + if ctol == ctoc { + // ctoc is either 0 or inf. + mul = ctoc + done = true + cfromc = 1 + } else if math.Abs(cfrom1) > math.Abs(ctoc) && ctoc != 0 { + mul = smlnum + done = false + cfromc = cfrom1 + } else if math.Abs(ctol) > math.Abs(cfromc) { + mul = bignum + done = false + ctoc = ctol + } else { + mul = ctoc / cfromc + done = true + } + } + switch kind { + default: + panic("lapack: not implemented") + case lapack.General: + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + case lapack.UpperTri: + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + case lapack.LowerTri: + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + } + if done { + break + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go new file mode 100644 index 00000000..3116631e --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go @@ -0,0 +1,40 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlaset sets the off-diagonal elements of A to alpha, and the diagonal +// elements to beta. If uplo == blas.Upper, only the elements in the upper +// triangular part are set. If uplo == blas.Lower, only the elements in the +// lower triangular part are set. If uplo is otherwise, all of the elements of A +// are set. +// +// Dlaset is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaset(uplo blas.Uplo, m, n int, alpha, beta float64, a []float64, lda int) { + checkMatrix(m, n, a, lda) + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + a[i*lda+j] = alpha + } + } + } else if uplo == blas.Lower { + for i := 0; i < m; i++ { + for j := 0; j < min(i+1, n); j++ { + a[i*lda+j] = alpha + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = alpha + } + } + } + for i := 0; i < min(m, n); i++ { + a[i*lda+i] = beta + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go new file mode 100644 index 00000000..4a37cfc5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go @@ -0,0 +1,97 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlasq1 computes the singular values of an n×n bidiagonal matrix with diagonal +// d and off-diagonal e. On exit, d contains the singular values in decreasing +// order, and e is overwritten. d must have length at least n, e must have +// length at least n-1, and the input work must have length at least 4*n. Dlasq1 +// will panic if these conditions are not met. +// +// Dlasq1 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq1(n int, d, e, work []float64) (info int) { + // TODO(btracey): replace info with an error. + if n < 0 { + panic(nLT0) + } + if len(work) < 4*n { + panic(badWork) + } + if len(d) < n { + panic("lapack: length of d less than n") + } + if len(e) < n-1 { + panic("lapack: length of e less than n-1") + } + if n == 0 { + return info + } + if n == 1 { + d[0] = math.Abs(d[0]) + return info + } + if n == 2 { + d[1], d[0] = impl.Dlas2(d[0], e[0], d[1]) + return info + } + // Estimate the largest singular value. + var sigmx float64 + for i := 0; i < n-1; i++ { + d[i] = math.Abs(d[i]) + sigmx = math.Max(sigmx, math.Abs(e[i])) + } + d[n-1] = math.Abs(d[n-1]) + // Early return if sigmx is zero (matrix is already diagonal). + if sigmx == 0 { + impl.Dlasrt(lapack.SortDecreasing, n, d) + return info + } + + for i := 0; i < n; i++ { + sigmx = math.Max(sigmx, d[i]) + } + + // Copy D and E into WORK (in the Z format) and scale (squaring the + // input data makes scaling by a power of the radix pointless). + + eps := dlamchP + safmin := dlamchS + scale := math.Sqrt(eps / safmin) + bi := blas64.Implementation() + bi.Dcopy(n, d, 1, work, 2) + bi.Dcopy(n-1, e, 1, work[1:], 2) + impl.Dlascl(lapack.General, 0, 0, sigmx, scale, 2*n-1, 1, work, 1) + + // Compute the q's and e's. + for i := 0; i < 2*n-1; i++ { + work[i] *= work[i] + } + work[2*n-1] = 0 + + info = impl.Dlasq2(n, work) + if info == 0 { + for i := 0; i < n; i++ { + d[i] = math.Sqrt(work[i]) + } + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) + } else if info == 2 { + // Maximum number of iterations exceeded. Move data from work + // into D and E so the calling subroutine can try to finish. + for i := 0; i < n; i++ { + d[i] = math.Sqrt(work[2*i]) + e[i] = math.Sqrt(work[2*i+1]) + } + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, e, 1) + } + return info +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go new file mode 100644 index 00000000..009e506b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go @@ -0,0 +1,368 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlasq2 computes all the eigenvalues of the symmetric positive +// definite tridiagonal matrix associated with the qd array Z. Eigevalues +// are computed to high relative accuracy avoiding denormalization, underflow +// and overflow. +// +// To see the relation of Z to the tridiagonal matrix, let L be a +// unit lower bidiagonal matrix with sub-diagonals Z(2,4,6,,..) and +// let U be an upper bidiagonal matrix with 1's above and diagonal +// Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the +// symmetric tridiagonal to which it is similar. +// +// info returns a status error. The return codes mean as follows: +// 0: The algorithm completed successfully. +// 1: A split was marked by a positive value in e. +// 2: Current block of Z not diagonalized after 100*n iterations (in inner +// while loop). On exit Z holds a qd array with the same eigenvalues as +// the given Z. +// 3: Termination criterion of outer while loop not met (program created more +// than N unreduced blocks). +// +// z must have length at least 4*n, and must not contain any negative elements. +// Dlasq2 will panic otherwise. +// +// Dlasq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq2(n int, z []float64) (info int) { + // TODO(btracey): make info an error. + if len(z) < 4*n { + panic(badZ) + } + const cbias = 1.5 + + eps := dlamchP + safmin := dlamchS + tol := eps * 100 + tol2 := tol * tol + if n < 0 { + panic(nLT0) + } + if n == 0 { + return info + } + if n == 1 { + if z[0] < 0 { + panic(negZ) + } + return info + } + if n == 2 { + if z[1] < 0 || z[2] < 0 { + panic("lapack: bad z value") + } else if z[2] > z[0] { + z[0], z[2] = z[2], z[0] + } + z[4] = z[0] + z[1] + z[2] + if z[1] > z[2]*tol2 { + t := 0.5 * (z[0] - z[2] + z[1]) + s := z[2] * (z[1] / t) + if s <= t { + s = z[2] * (z[1] / (t * (1 + math.Sqrt(1+s/t)))) + } else { + s = z[2] * (z[1] / (t + math.Sqrt(t)*math.Sqrt(t+s))) + } + t = z[0] + s + z[1] + z[2] *= z[0] / t + z[0] = t + } + z[1] = z[2] + z[5] = z[1] + z[0] + return info + } + // Check for negative data and compute sums of q's and e's. + z[2*n-1] = 0 + emin := z[1] + var d, e, qmax float64 + var i1, n1 int + for k := 0; k < 2*(n-1); k += 2 { + if z[k] < 0 || z[k+1] < 0 { + panic("lapack: bad z value") + } + d += z[k] + e += z[k+1] + qmax = math.Max(qmax, z[k]) + emin = math.Min(emin, z[k+1]) + } + if z[2*(n-1)] < 0 { + panic("lapack: bad z value") + } + d += z[2*(n-1)] + qmax = math.Max(qmax, z[2*(n-1)]) + // Check for diagonality. + if e == 0 { + for k := 1; k < n; k++ { + z[k] = z[2*k] + } + impl.Dlasrt(lapack.SortDecreasing, n, z) + z[2*(n-1)] = d + return info + } + trace := d + e + // Check for zero data. + if trace == 0 { + z[2*(n-1)] = 0 + return info + } + // Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). + for k := 2 * n; k >= 2; k -= 2 { + z[2*k-1] = 0 + z[2*k-2] = z[k-1] + z[2*k-3] = 0 + z[2*k-4] = z[k-2] + } + i0 := 0 + n0 := n - 1 + + // Reverse the qd-array, if warranted. + // z[4*i0-3] --> z[4*(i0+1)-3-1] --> z[4*i0] + if cbias*z[4*i0] < z[4*n0] { + ipn4Out := 4 * (i0 + n0 + 2) + for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { + i4 := i4loop - 1 + ipn4 := ipn4Out - 1 + z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] + z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] + } + } + + // Initial split checking via dqd and Li's test. + pp := 0 + for k := 0; k < 2; k++ { + d = z[4*n0+pp] + for i4loop := 4*n0 + pp; i4loop >= 4*(i0+1)+pp; i4loop -= 4 { + i4 := i4loop - 1 + if z[i4-1] <= tol2*d { + z[i4-1] = math.Copysign(0, -1) + d = z[i4-3] + } else { + d = z[i4-3] * (d / (d + z[i4-1])) + } + } + // dqd maps Z to ZZ plus Li's test. + emin = z[4*(i0+1)+pp] + d = z[4*i0+pp] + for i4loop := 4*(i0+1) + pp; i4loop <= 4*n0+pp; i4loop += 4 { + i4 := i4loop - 1 + z[i4-2*pp-2] = d + z[i4-1] + if z[i4-1] <= tol2*d { + z[i4-1] = math.Copysign(0, -1) + z[i4-2*pp-2] = d + z[i4-2*pp] = 0 + d = z[i4+1] + } else if safmin*z[i4+1] < z[i4-2*pp-2] && safmin*z[i4-2*pp-2] < z[i4+1] { + tmp := z[i4+1] / z[i4-2*pp-2] + z[i4-2*pp] = z[i4-1] * tmp + d *= tmp + } else { + z[i4-2*pp] = z[i4+1] * (z[i4-1] / z[i4-2*pp-2]) + d = z[i4+1] * (d / z[i4-2*pp-2]) + } + emin = math.Min(emin, z[i4-2*pp]) + } + z[4*(n0+1)-pp-3] = d + + // Now find qmax. + qmax = z[4*(i0+1)-pp-3] + for i4loop := 4*(i0+1) - pp + 2; i4loop <= 4*(n0+1)+pp-2; i4loop += 4 { + i4 := i4loop - 1 + qmax = math.Max(qmax, z[i4]) + } + // Prepare for the next iteration on K. + pp = 1 - pp + } + + // Initialise variables to pass to DLASQ3. + var ttype int + var dmin1, dmin2, dn, dn1, dn2, g, tau float64 + var tempq float64 + iter := 2 + var nFail int + nDiv := 2 * (n0 - i0) + var i4 int +outer: + for iwhila := 1; iwhila <= n+1; iwhila++ { + // Test for completion. + if n0 < 0 { + // Move q's to the front. + for k := 1; k < n; k++ { + z[k] = z[4*k] + } + // Sort and compute sum of eigenvalues. + impl.Dlasrt(lapack.SortDecreasing, n, z) + e = 0 + for k := n - 1; k >= 0; k-- { + e += z[k] + } + // Store trace, sum(eigenvalues) and information on performance. + z[2*n] = trace + z[2*n+1] = e + z[2*n+2] = float64(iter) + z[2*n+3] = float64(nDiv) / float64(n*n) + z[2*n+4] = 100 * float64(nFail) / float64(iter) + return info + } + + // While array unfinished do + // e[n0] holds the value of sigma when submatrix in i0:n0 + // splits from the rest of the array, but is negated. + var desig float64 + var sigma float64 + if n0 != n-1 { + sigma = -z[4*(n0+1)-2] + } + if sigma < 0 { + info = 1 + return info + } + // Find last unreduced submatrix's top index i0, find qmax and + // emin. Find Gershgorin-type bound if Q's much greater than E's. + var emax float64 + if n0 > i0 { + emin = math.Abs(z[4*(n0+1)-6]) + } else { + emin = 0 + } + qmin := z[4*(n0+1)-4] + qmax = qmin + zSmall := false + for i4loop := 4 * (n0 + 1); i4loop >= 8; i4loop -= 4 { + i4 = i4loop - 1 + if z[i4-5] <= 0 { + zSmall = true + break + } + if qmin >= 4*emax { + qmin = math.Min(qmin, z[i4-3]) + emax = math.Max(emax, z[i4-5]) + } + qmax = math.Max(qmax, z[i4-7]+z[i4-5]) + emin = math.Min(emin, z[i4-5]) + } + if !zSmall { + i4 = 3 + } + i0 = (i4+1)/4 - 1 + pp = 0 + if n0-i0 > 1 { + dee := z[4*i0] + deemin := dee + kmin := i0 + for i4loop := 4*(i0+1) + 1; i4loop <= 4*(n0+1)-3; i4loop += 4 { + i4 := i4loop - 1 + dee = z[i4] * (dee / (dee + z[i4-2])) + if dee <= deemin { + deemin = dee + kmin = (i4+4)/4 - 1 + } + } + if (kmin-i0)*2 < n0-kmin && deemin <= 0.5*z[4*n0] { + ipn4Out := 4 * (i0 + n0 + 2) + pp = 2 + for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { + i4 := i4loop - 1 + ipn4 := ipn4Out - 1 + z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] + z[i4-2], z[ipn4-i4-3] = z[ipn4-i4-3], z[i4-2] + z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] + z[i4], z[ipn4-i4-5] = z[ipn4-i4-5], z[i4] + } + } + } + // Put -(initial shift) into DMIN. + dmin := -math.Max(0, qmin-2*math.Sqrt(qmin)*math.Sqrt(emax)) + + // Now i0:n0 is unreduced. + // PP = 0 for ping, PP = 1 for pong. + // PP = 2 indicates that flipping was applied to the Z array and + // and that the tests for deflation upon entry in Dlasq3 + // should not be performed. + nbig := 100 * (n0 - i0 + 1) + for iwhilb := 0; iwhilb < nbig; iwhilb++ { + if i0 > n0 { + continue outer + } + + // While submatrix unfinished take a good dqds step. + i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau = + impl.Dlasq3(i0, n0, z, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau) + + pp = 1 - pp + // When emin is very small check for splits. + if pp == 0 && n0-i0 >= 3 { + if z[4*(n0+1)-1] <= tol2*qmax || z[4*(n0+1)-2] <= tol2*sigma { + splt := i0 - 1 + qmax = z[4*i0] + emin = z[4*(i0+1)-2] + oldemn := z[4*(i0+1)-1] + for i4loop := 4 * (i0 + 1); i4loop <= 4*(n0-2); i4loop += 4 { + i4 := i4loop - 1 + if z[i4] <= tol2*z[i4-3] || z[i4-1] <= tol2*sigma { + z[i4-1] = -sigma + splt = i4 / 4 + qmax = 0 + emin = z[i4+3] + oldemn = z[i4+4] + } else { + qmax = math.Max(qmax, z[i4+1]) + emin = math.Min(emin, z[i4-1]) + oldemn = math.Min(oldemn, z[i4]) + } + } + z[4*(n0+1)-2] = emin + z[4*(n0+1)-1] = oldemn + i0 = splt + 1 + } + } + } + // Maximum number of iterations exceeded, restore the shift + // sigma and place the new d's and e's in a qd array. + // This might need to be done for several blocks. + info = 2 + i1 = i0 + n1 = n0 + for { + tempq = z[4*i0] + z[4*i0] += sigma + for k := i0 + 1; k <= n0; k++ { + tempe := z[4*(k+1)-6] + z[4*(k+1)-6] *= tempq / z[4*(k+1)-8] + tempq = z[4*k] + z[4*k] += sigma + tempe - z[4*(k+1)-6] + } + // Prepare to do this on the previous block if there is one. + if i1 <= 0 { + break + } + n1 = i1 - 1 + for i1 >= 1 && z[4*(i1+1)-6] >= 0 { + i1 -= 1 + } + sigma = -z[4*(n1+1)-2] + } + for k := 0; k < n; k++ { + z[2*k] = z[4*k] + // Only the block 1..N0 is unfinished. The rest of the e's + // must be essentially zero, although sometimes other data + // has been stored in them. + if k < n0 { + z[2*(k+1)-1] = z[4*(k+1)-1] + } else { + z[2*(k+1)] = 0 + } + } + return info + } + info = 3 + return info +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go new file mode 100644 index 00000000..7139ebb5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go @@ -0,0 +1,161 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq3 checks for deflation, computes a shift (tau) and calls dqds. +// In case of failure it changes shifts, and tries again until output +// is positive. +// +// Dlasq3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq3(i0, n0 int, z []float64, pp int, dmin, sigma, desig, qmax float64, nFail, iter, nDiv int, ttype int, dmin1, dmin2, dn, dn1, dn2, g, tau float64) ( + i0Out, n0Out, ppOut int, dminOut, sigmaOut, desigOut, qmaxOut float64, nFailOut, iterOut, nDivOut, ttypeOut int, dmin1Out, dmin2Out, dnOut, dn1Out, dn2Out, gOut, tauOut float64) { + const cbias = 1.5 + + n0in := n0 + eps := dlamchP + tol := eps * 100 + tol2 := tol * tol + var nn int + var t float64 + for { + if n0 < i0 { + return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau + } + if n0 == i0 { + z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma + n0-- + continue + } + nn = 4*(n0+1) + pp - 1 + if n0 != i0+1 { + // Check whether e[n0-1] is negligible, 1 eigenvalue. + if z[nn-5] > tol2*(sigma+z[nn-3]) && z[nn-2*pp-4] > tol2*z[nn-7] { + // Check whether e[n0-2] is negligible, 2 eigenvalues. + if z[nn-9] > tol2*sigma && z[nn-2*pp-8] > tol2*z[nn-11] { + break + } + } else { + z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma + n0-- + continue + } + } + if z[nn-3] > z[nn-7] { + z[nn-3], z[nn-7] = z[nn-7], z[nn-3] + } + t = 0.5 * (z[nn-7] - z[nn-3] + z[nn-5]) + if z[nn-5] > z[nn-3]*tol2 && t != 0 { + s := z[nn-3] * (z[nn-5] / t) + if s <= t { + s = z[nn-3] * (z[nn-5] / (t * (1 + math.Sqrt(1+s/t)))) + } else { + s = z[nn-3] * (z[nn-5] / (t + math.Sqrt(t)*math.Sqrt(t+s))) + } + t = z[nn-7] + (s + z[nn-5]) + z[nn-3] *= z[nn-7] / t + z[nn-7] = t + } + z[4*(n0+1)-8] = z[nn-7] + sigma + z[4*(n0+1)-4] = z[nn-3] + sigma + n0 -= 2 + } + if pp == 2 { + pp = 0 + } + + // Reverse the qd-array, if warranted. + if dmin <= 0 || n0 < n0in { + if cbias*z[4*(i0+1)+pp-4] < z[4*(n0+1)+pp-4] { + ipn4Out := 4 * (i0 + n0 + 2) + for j4loop := 4 * (i0 + 1); j4loop <= 2*((i0+1)+(n0+1)-1); j4loop += 4 { + ipn4 := ipn4Out - 1 + j4 := j4loop - 1 + + z[j4-3], z[ipn4-j4-4] = z[ipn4-j4-4], z[j4-3] + z[j4-2], z[ipn4-j4-3] = z[ipn4-j4-3], z[j4-2] + z[j4-1], z[ipn4-j4-6] = z[ipn4-j4-6], z[j4-1] + z[j4], z[ipn4-j4-5] = z[ipn4-j4-5], z[j4] + } + if n0-i0 <= 4 { + z[4*(n0+1)+pp-2] = z[4*(i0+1)+pp-2] + z[4*(n0+1)-pp-1] = z[4*(i0+1)-pp-1] + } + dmin2 = math.Min(dmin2, z[4*(i0+1)-pp-2]) + z[4*(n0+1)+pp-2] = math.Min(math.Min(z[4*(n0+1)+pp-2], z[4*(i0+1)+pp-2]), z[4*(i0+1)+pp+2]) + z[4*(n0+1)-pp-1] = math.Min(math.Min(z[4*(n0+1)-pp-1], z[4*(i0+1)-pp-1]), z[4*(i0+1)-pp+3]) + qmax = math.Max(math.Max(qmax, z[4*(i0+1)+pp-4]), z[4*(i0+1)+pp]) + dmin = math.Copysign(0, -1) // Fortran code has -zero, but -0 in go is 0 + } + } + + // Choose a shift. + tau, ttype, g = impl.Dlasq4(i0, n0, z, pp, n0in, dmin, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g) + + // Call dqds until dmin > 0. +loop: + for { + i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq5(i0, n0, z, pp, tau, sigma) + + nDiv += n0 - i0 + 2 + iter++ + switch { + case dmin >= 0 && dmin1 >= 0: + // Success. + goto done + + case dmin < 0 && dmin1 > 0 && z[4*n0-pp-1] < tol*(sigma+dn1) && math.Abs(dn) < tol*sigma: + // Convergence hidden by negative dn. + z[4*n0-pp+1] = 0 + dmin = 0 + goto done + + case dmin < 0: + // Tau too big. Select new Tau and try again. + nFail++ + if ttype < -22 { + // Failed twice. Play it safe. + tau = 0 + } else if dmin1 > 0 { + // Late failure. Gives excellent shift. + tau = (tau + dmin) * (1 - 2*eps) + ttype -= 11 + } else { + // Early failure. Divide by 4. + tau = tau / 4 + ttype -= 12 + } + + case math.IsNaN(dmin): + if tau == 0 { + break loop + } + tau = 0 + + default: + // Possible underflow. Play it safe. + break loop + } + } + + // Risk of underflow. + dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq6(i0, n0, z, pp) + nDiv += n0 - i0 + 2 + iter++ + tau = 0 + +done: + if tau < sigma { + desig += tau + t = sigma + desig + desig -= t - sigma + } else { + t = sigma + tau + desig += sigma - (t - tau) + } + sigma = t + return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go new file mode 100644 index 00000000..1d581a20 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go @@ -0,0 +1,238 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq4 computes an approximation to the smallest eigenvalue using values of d +// from the previous transform. +// i0, n0, and n0in are zero-indexed. +// +// Dlasq4 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq4(i0, n0 int, z []float64, pp int, n0in int, dmin, dmin1, dmin2, dn, dn1, dn2, tau float64, ttype int, g float64) (tauOut float64, ttypeOut int, gOut float64) { + const ( + cnst1 = 0.563 + cnst2 = 1.01 + cnst3 = 1.05 + + cnstthird = 0.333 // TODO(btracey): Fix? + ) + // A negative dmin forces the shift to take that absolute value + // ttype records the type of shift. + if dmin <= 0 { + tau = -dmin + ttype = -1 + return tau, ttype, g + } + nn := 4*(n0+1) + pp - 1 // -1 for zero indexing + s := math.NaN() // Poison s so that failure to take a path below is obvious + if n0in == n0 { + // No eigenvalues deflated. + if dmin == dn || dmin == dn1 { + b1 := math.Sqrt(z[nn-3]) * math.Sqrt(z[nn-5]) + b2 := math.Sqrt(z[nn-7]) * math.Sqrt(z[nn-9]) + a2 := z[nn-7] + z[nn-5] + if dmin == dn && dmin1 == dn1 { + gap2 := dmin2 - a2 - dmin2/4 + var gap1 float64 + if gap2 > 0 && gap2 > b2 { + gap1 = a2 - dn - (b2/gap2)*b2 + } else { + gap1 = a2 - dn - (b1 + b2) + } + if gap1 > 0 && gap1 > b1 { + s = math.Max(dn-(b1/gap1)*b1, 0.5*dmin) + ttype = -2 + } else { + s = 0 + if dn > b1 { + s = dn - b1 + } + if a2 > b1+b2 { + s = math.Min(s, a2-(b1+b2)) + } + s = math.Max(s, cnstthird*dmin) + ttype = -3 + } + } else { + ttype = -4 + s = dmin / 4 + var gam float64 + var np int + if dmin == dn { + gam = dn + a2 = 0 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b2 = z[nn-5] / z[nn-7] + np = nn - 9 + } else { + np = nn - 2*pp + gam = dn1 + if z[np-4] > z[np-2] { + return tau, ttype, g + } + a2 = z[np-4] / z[np-2] + if z[nn-9] > z[nn-11] { + return tau, ttype, g + } + b2 = z[nn-9] / z[nn-11] + np = nn - 13 + } + // Approximate contribution to norm squared from i < nn-1. + a2 += b2 + for i4loop := np + 1; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if b2 == 0 { + break + } + b1 = b2 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b2 *= z[i4] / z[i4-2] + a2 += b2 + if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { + break + } + } + a2 *= cnst3 + // Rayleigh quotient residual bound. + if a2 < cnst1 { + s = gam * (1 - math.Sqrt(a2)) / (1 + a2) + } + } + } else if dmin == dn2 { + ttype = -5 + s = dmin / 4 + // Compute contribution to norm squared from i > nn-2. + np := nn - 2*pp + b1 := z[np-2] + b2 := z[np-6] + gam := dn2 + if z[np-8] > b2 || z[np-4] > b1 { + return tau, ttype, g + } + a2 := (z[np-8] / b2) * (1 + z[np-4]/b1) + // Approximate contribution to norm squared from i < nn-2. + if n0-i0 > 2 { + b2 = z[nn-13] / z[nn-15] + a2 += b2 + for i4loop := (nn + 1) - 17; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if b2 == 0 { + break + } + b1 = b2 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b2 *= z[i4] / z[i4-2] + a2 += b2 + if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { + break + } + } + a2 *= cnst3 + } + if a2 < cnst1 { + s = gam * (1 - math.Sqrt(a2)) / (1 + a2) + } + } else { + // Case 6, no information to guide us. + if ttype == -6 { + g += cnstthird * (1 - g) + } else if ttype == -18 { + g = cnstthird / 4 + } else { + g = 1.0 / 4 + } + s = g * dmin + ttype = -6 + } + } else if n0in == (n0 + 1) { + // One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. + if dmin1 == dn1 && dmin2 == dn2 { + ttype = -7 + s = cnstthird * dmin1 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b1 := z[nn-5] / z[nn-7] + b2 := b1 + if b2 != 0 { + for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + a2 := b1 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b1 *= z[i4] / z[i4-2] + b2 += b1 + if 100*math.Max(b1, a2) < b2 { + break + } + } + } + b2 = math.Sqrt(cnst3 * b2) + a2 := dmin1 / (1 + b2*b2) + gap2 := 0.5*dmin2 - a2 + if gap2 > 0 && gap2 > b2*a2 { + s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) + } else { + s = math.Max(s, a2*(1-cnst2*b2)) + ttype = -8 + } + } else { + s = dmin1 / 4 + if dmin1 == dn1 { + s = 0.5 * dmin1 + } + ttype = -9 + } + } else if n0in == (n0 + 2) { + // Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. + if dmin2 == dn2 && 2*z[nn-5] < z[nn-7] { + ttype = -10 + s = cnstthird * dmin2 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b1 := z[nn-5] / z[nn-7] + b2 := b1 + if b2 != 0 { + for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b1 *= z[i4] / z[i4-2] + b2 += b1 + if 100*b1 < b2 { + break + } + } + } + b2 = math.Sqrt(cnst3 * b2) + a2 := dmin2 / (1 + b2*b2) + gap2 := z[nn-7] + z[nn-9] - math.Sqrt(z[nn-11])*math.Sqrt(z[nn-9]) - a2 + if gap2 > 0 && gap2 > b2*a2 { + s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) + } else { + s = math.Max(s, a2*(1-cnst2*b2)) + } + } else { + s = dmin2 / 4 + ttype = -11 + } + } else if n0in > n0+2 { + // Case 12, more than two eigenvalues deflated. No information. + s = 0 + ttype = -12 + } + tau = s + return tau, ttype, g +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go new file mode 100644 index 00000000..e9e9fbeb --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go @@ -0,0 +1,127 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq5 computes one dqds transform in ping-pong form. +// i0 and n0 are zero-indexed. +// +// Dlasq5 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq5(i0, n0 int, z []float64, pp int, tau, sigma float64) (i0Out, n0Out, ppOut int, tauOut, sigmaOut, dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { + // The lapack function has inputs for ieee and eps, but Go requires ieee so + // these are unnecessary. + if n0-i0-1 <= 0 { + return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 + } + eps := dlamchP + dthresh := eps * (sigma + tau) + if tau < dthresh*0.5 { + tau = 0 + } + var j4 int + var emin float64 + if tau != 0 { + j4 = 4*i0 + pp + emin = z[j4+4] + d := z[j4] - tau + dmin = d + // In the reference there are code paths that actually return this value. + // dmin1 = -z[j4] + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-2] = d + z[j4-1] + tmp := z[j4+1] / z[j4-2] + d = d*tmp - tau + dmin = math.Min(dmin, d) + z[j4] = z[j4-1] * tmp + emin = math.Min(z[j4], emin) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + tmp := z[j4+2] / z[j4-3] + d = d*tmp - tau + dmin = math.Min(dmin, d) + z[j4-1] = z[j4] * tmp + emin = math.Min(z[j4-1], emin) + } + } + // Unroll the last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*((n0+1)-2) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau + dmin = math.Min(dmin, dnm1) + + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau + dmin = math.Min(dmin, dn) + } else { + // This is the version that sets d's to zero if they are small enough. + j4 = 4*(i0+1) + pp - 4 + emin = z[j4+4] + d := z[j4] - tau + dmin = d + // In the reference there are code paths that actually return this value. + // dmin1 = -z[j4] + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-2] = d + z[j4-1] + tmp := z[j4+1] / z[j4-2] + d = d*tmp - tau + if d < dthresh { + d = 0 + } + dmin = math.Min(dmin, d) + z[j4] = z[j4-1] * tmp + emin = math.Min(z[j4], emin) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + tmp := z[j4+2] / z[j4-3] + d = d*tmp - tau + if d < dthresh { + d = 0 + } + dmin = math.Min(dmin, d) + z[j4-1] = z[j4] * tmp + emin = math.Min(z[j4-1], emin) + } + } + // Unroll the last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*((n0+1)-2) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau + dmin = math.Min(dmin, dnm1) + + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau + dmin = math.Min(dmin, dn) + } + z[j4+2] = dn + z[4*(n0+1)-pp-1] = emin + return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go new file mode 100644 index 00000000..f12cbf6a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go @@ -0,0 +1,109 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq6 computes one dqd transform in ping-pong form with protection against +// overflow and underflow. z has length at least 4*(n0+1) and holds the qd array. +// i0 is the zero-based first index. +// n0 is the zero-based last index. +// +// Dlasq6 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq6(i0, n0 int, z []float64, pp int) (dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { + if len(z) < 4*(n0+1) { + panic(badZ) + } + if n0-i0-1 <= 0 { + return dmin, dmin1, dmin2, dn, dnm1, dnm2 + } + safmin := dlamchS + j4 := 4*(i0+1) + pp - 4 // -4 rather than -3 for zero indexing + emin := z[j4+4] + d := z[j4] + dmin = d + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 // Translate back to zero-indexed. + z[j4-2] = d + z[j4-1] + if z[j4-2] == 0 { + z[j4] = 0 + d = z[j4+1] + dmin = d + emin = 0 + } else if safmin*z[j4+1] < z[j4-2] && safmin*z[j4-2] < z[j4+1] { + tmp := z[j4+1] / z[j4-2] + z[j4] = z[j4-1] * tmp + d *= tmp + } else { + z[j4] = z[j4+1] * (z[j4-1] / z[j4-2]) + d = z[j4+1] * (d / z[j4-2]) + } + dmin = math.Min(dmin, d) + emin = math.Min(emin, z[j4]) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + if z[j4-3] == 0 { + z[j4-1] = 0 + d = z[j4+2] + dmin = d + emin = 0 + } else if safmin*z[j4+2] < z[j4-3] && safmin*z[j4-3] < z[j4+2] { + tmp := z[j4+2] / z[j4-3] + z[j4-1] = z[j4] * tmp + d *= tmp + } else { + z[j4-1] = z[j4+2] * (z[j4] / z[j4-3]) + d = z[j4+2] * (d / z[j4-3]) + } + dmin = math.Min(dmin, d) + emin = math.Min(emin, z[j4-1]) + } + } + // Unroll last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*(n0-1) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + if z[j4-2] == 0 { + z[j4] = 0 + dnm1 = z[j4p2+2] + dmin = dnm1 + emin = 0 + } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { + tmp := z[j4p2+2] / z[j4-2] + z[j4] = z[j4p2] * tmp + dnm1 = dnm2 * tmp + } else { + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2] * (dnm2 / z[j4-2]) + } + dmin = math.Min(dmin, dnm1) + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + if z[j4-2] == 0 { + z[j4] = 0 + dn = z[j4p2+2] + dmin = dn + emin = 0 + } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { + tmp := z[j4p2+2] / z[j4-2] + z[j4] = z[j4p2] * tmp + dn = dnm1 * tmp + } else { + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2] * (dnm1 / z[j4-2]) + } + dmin = math.Min(dmin, dn) + z[j4+2] = dn + z[4*(n0+1)-pp-1] = emin + return dmin, dmin1, dmin2, dn, dnm1, dnm2 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go new file mode 100644 index 00000000..cc9e3910 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go @@ -0,0 +1,268 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlasr applies a sequence of plane rotations to the m×n matrix A. This series +// of plane rotations is implicitly represented by a matrix P. P is multiplied +// by a depending on the value of side -- A = P * A if side == lapack.Left, +// A = A * P^T if side == lapack.Right. +// +//The exact value of P depends on the value of pivot, but in all cases P is +// implicitly represented by a series of 2×2 rotation matrices. The entries of +// rotation matrix k are defined by s[k] and c[k] +// R(k) = [ c[k] s[k]] +// [-s[k] s[k]] +// If direct == lapack.Forward, the rotation matrices are applied as +// P = P(z-1) * ... * P(2) * P(1), while if direct == lapack.Backward they are +// applied as P = P(1) * P(2) * ... * P(n). +// +// pivot defines the mapping of the elements in R(k) to P(k). +// If pivot == lapack.Variable, the rotation is performed for the (k, k+1) plane. +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k] ] +// [ -s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// if pivot == lapack.Top, the rotation is performed for the (1, k+1) plane, +// P(k) = [c[k] s[k] ] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [-s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// and if pivot == lapack.Bottom, the rotation is performed for the (k, z) plane. +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k]] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [ -s[k] c[k]] +// s and c have length m - 1 if side == blas.Left, and n - 1 if side == blas.Right. +// +// Dlasr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasr(side blas.Side, pivot lapack.Pivot, direct lapack.Direct, m, n int, c, s, a []float64, lda int) { + checkMatrix(m, n, a, lda) + if side != blas.Left && side != blas.Right { + panic(badSide) + } + if pivot != lapack.Variable && pivot != lapack.Top && pivot != lapack.Bottom { + panic(badPivot) + } + if direct != lapack.Forward && direct != lapack.Backward { + panic(badDirect) + } + if side == blas.Left { + if len(c) < m-1 { + panic(badSlice) + } + if len(s) < m-1 { + panic(badSlice) + } + } else { + if len(c) < n-1 { + panic(badSlice) + } + if len(s) < n-1 { + panic(badSlice) + } + } + if m == 0 || n == 0 { + return + } + if side == blas.Left { + if pivot == lapack.Variable { + if direct == lapack.Forward { + for j := 0; j < m-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp2 := a[j*lda+i] + tmp := a[(j+1)*lda+i] + a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 + a[j*lda+i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := m - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp2 := a[j*lda+i] + tmp := a[(j+1)*lda+i] + a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 + a[j*lda+i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } else if pivot == lapack.Top { + if direct == lapack.Forward { + for j := 1; j < m; j++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[i] + a[j*lda+i] = ctmp*tmp - stmp*tmp2 + a[i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := m - 1; j >= 1; j-- { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[i] + a[j*lda+i] = ctmp*tmp - stmp*tmp2 + a[i] = stmp*tmp + ctmp*tmp2 + } + } + } + } + } + return + } + if direct == lapack.Forward { + for j := 0; j < m-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[(m-1)*lda+i] + a[j*lda+i] = stmp*tmp2 + ctmp*tmp + a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp + } + } + } + return + } + for j := m - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[(m-1)*lda+i] + a[j*lda+i] = stmp*tmp2 + ctmp*tmp + a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp + } + } + } + return + } + if pivot == lapack.Variable { + if direct == lapack.Forward { + for j := 0; j < n-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j+1] + tmp2 := a[i*lda+j] + a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 + a[i*lda+j] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := n - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j+1] + tmp2 := a[i*lda+j] + a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 + a[i*lda+j] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } else if pivot == lapack.Top { + if direct == lapack.Forward { + for j := 1; j < n; j++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda] + a[i*lda+j] = ctmp*tmp - stmp*tmp2 + a[i*lda] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := n - 1; j >= 1; j-- { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda] + a[i*lda+j] = ctmp*tmp - stmp*tmp2 + a[i*lda] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + if direct == lapack.Forward { + for j := 0; j < n-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda+n-1] + a[i*lda+j] = stmp*tmp2 + ctmp*tmp + a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp + } + + } + } + return + } + for j := n - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda+n-1] + a[i*lda+j] = stmp*tmp2 + ctmp*tmp + a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go new file mode 100644 index 00000000..86786cfe --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go @@ -0,0 +1,30 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "sort" + + "gonum.org/v1/gonum/lapack" +) + +// Dlasrt sorts the numbers in the input slice d. If s == lapack.SortIncreasing, +// the elements are sorted in increasing order. If s == lapack.SortDecreasing, +// the elements are sorted in decreasing order. For other values of s Dlasrt +// will panic. +// +// Dlasrt is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasrt(s lapack.Sort, n int, d []float64) { + checkVector(n, d, 1) + d = d[:n] + switch s { + default: + panic(badSort) + case lapack.SortIncreasing: + sort.Float64s(d) + case lapack.SortDecreasing: + sort.Sort(sort.Reverse(sort.Float64Slice(d))) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go new file mode 100644 index 00000000..5a7f8709 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go @@ -0,0 +1,31 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlassq updates a sum of squares in scaled form. The input parameters scale and +// sumsq represent the current scale and total sum of squares. These values are +// updated with the information in the first n elements of the vector specified +// by x and incX. +// +// Dlassq is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlassq(n int, x []float64, incx int, scale float64, sumsq float64) (scl, smsq float64) { + if n <= 0 { + return scale, sumsq + } + for ix := 0; ix <= (n-1)*incx; ix += incx { + absxi := math.Abs(x[ix]) + if absxi > 0 || math.IsNaN(absxi) { + if scale < absxi { + sumsq = 1 + sumsq*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumsq += (absxi / scale) * (absxi / scale) + } + } + } + return scale, sumsq +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go new file mode 100644 index 00000000..204af193 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go @@ -0,0 +1,115 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasv2 computes the singular value decomposition of a 2×2 matrix. +// [ csl snl] [f g] [csr -snr] = [ssmax 0] +// [-snl csl] [0 h] [snr csr] = [ 0 ssmin] +// ssmax is the larger absolute singular value, and ssmin is the smaller absolute +// singular value. [cls, snl] and [csr, snr] are the left and right singular vectors. +// +// Dlasv2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasv2(f, g, h float64) (ssmin, ssmax, snr, csr, snl, csl float64) { + ft := f + fa := math.Abs(ft) + ht := h + ha := math.Abs(h) + // pmax points to the largest element of the matrix in terms of absolute value. + // 1 if F, 2 if G, 3 if H. + pmax := 1 + swap := ha > fa + if swap { + pmax = 3 + ft, ht = ht, ft + fa, ha = ha, fa + } + gt := g + ga := math.Abs(gt) + var clt, crt, slt, srt float64 + if ga == 0 { + ssmin = ha + ssmax = fa + clt = 1 + crt = 1 + slt = 0 + srt = 0 + } else { + gasmall := true + if ga > fa { + pmax = 2 + if (fa / ga) < dlamchE { + gasmall = false + ssmax = ga + if ha > 1 { + ssmin = fa / (ga / ha) + } else { + ssmin = (fa / ga) * ha + } + clt = 1 + slt = ht / gt + srt = 1 + crt = ft / gt + } + } + if gasmall { + d := fa - ha + l := d / fa + if d == fa { // deal with inf + l = 1 + } + m := gt / ft + t := 2 - l + s := math.Hypot(t, m) + var r float64 + if l == 0 { + r = math.Abs(m) + } else { + r = math.Hypot(l, m) + } + a := 0.5 * (s + r) + ssmin = ha / a + ssmax = fa * a + if m == 0 { + if l == 0 { + t = math.Copysign(2, ft) * math.Copysign(1, gt) + } else { + t = gt/math.Copysign(d, ft) + m/t + } + } else { + t = (m/(s+t) + m/(r+l)) * (1 + a) + } + l = math.Hypot(t, 2) + crt = 2 / l + srt = t / l + clt = (crt + srt*m) / a + slt = (ht / ft) * srt / a + } + } + if swap { + csl = srt + snl = crt + csr = slt + snr = clt + } else { + csl = clt + snl = slt + csr = crt + snr = srt + } + var tsign float64 + switch pmax { + case 1: + tsign = math.Copysign(1, csr) * math.Copysign(1, csl) * math.Copysign(1, f) + case 2: + tsign = math.Copysign(1, snr) * math.Copysign(1, csl) * math.Copysign(1, g) + case 3: + tsign = math.Copysign(1, snr) * math.Copysign(1, snl) * math.Copysign(1, h) + } + ssmax = math.Copysign(ssmax, tsign) + ssmin = math.Copysign(ssmin, tsign*math.Copysign(1, f)*math.Copysign(1, h)) + return ssmin, ssmax, snr, csr, snl, csl +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go new file mode 100644 index 00000000..c5586e05 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go @@ -0,0 +1,47 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlaswp swaps the rows k1 to k2 of a rectangular matrix A according to the +// indices in ipiv so that row k is swapped with ipiv[k]. +// +// n is the number of columns of A and incX is the increment for ipiv. If incX +// is 1, the swaps are applied from k1 to k2. If incX is -1, the swaps are +// applied in reverse order from k2 to k1. For other values of incX Dlaswp will +// panic. ipiv must have length k2+1, otherwise Dlaswp will panic. +// +// The indices k1, k2, and the elements of ipiv are zero-based. +// +// Dlaswp is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaswp(n int, a []float64, lda int, k1, k2 int, ipiv []int, incX int) { + switch { + case n < 0: + panic(nLT0) + case k2 < 0: + panic(badK2) + case k1 < 0 || k2 < k1: + panic(badK1) + case len(ipiv) != k2+1: + panic(badIpiv) + case incX != 1 && incX != -1: + panic(absIncNotOne) + } + + if n == 0 { + return + } + bi := blas64.Implementation() + if incX == 1 { + for k := k1; k <= k2; k++ { + bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) + } + return + } + for k := k2; k >= k1; k-- { + bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go new file mode 100644 index 00000000..abfe60e5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go @@ -0,0 +1,290 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlasy2 solves the Sylvester matrix equation where the matrices are of order 1 +// or 2. It computes the unknown n1×n2 matrix X so that +// TL*X + sgn*X*TR = scale*B, if tranl == false and tranr == false, +// TL^T*X + sgn*X*TR = scale*B, if tranl == true and tranr == false, +// TL*X + sgn*X*TR^T = scale*B, if tranl == false and tranr == true, +// TL^T*X + sgn*X*TR^T = scale*B, if tranl == true and tranr == true, +// where TL is n1×n1, TR is n2×n2, B is n1×n2, and 1 <= n1,n2 <= 2. +// +// isgn must be 1 or -1, and n1 and n2 must be 0, 1, or 2, but these conditions +// are not checked. +// +// Dlasy2 returns three values, a scale factor that is chosen less than or equal +// to 1 to prevent the solution overflowing, the infinity norm of the solution, +// and an indicator of success. If ok is false, TL and TR have eigenvalues that +// are too close, so TL or TR is perturbed to get a non-singular equation. +// +// Dlasy2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasy2(tranl, tranr bool, isgn, n1, n2 int, tl []float64, ldtl int, tr []float64, ldtr int, b []float64, ldb int, x []float64, ldx int) (scale, xnorm float64, ok bool) { + // TODO(vladimir-ch): Add input validation checks conditionally skipped + // using the build tag mechanism. + + ok = true + // Quick return if possible. + if n1 == 0 || n2 == 0 { + return scale, xnorm, ok + } + + // Set constants to control overflow. + eps := dlamchP + smlnum := dlamchS / eps + sgn := float64(isgn) + + if n1 == 1 && n2 == 1 { + // 1×1 case: TL11*X + sgn*X*TR11 = B11. + tau1 := tl[0] + sgn*tr[0] + bet := math.Abs(tau1) + if bet <= smlnum { + tau1 = smlnum + bet = smlnum + ok = false + } + scale = 1 + gam := math.Abs(b[0]) + if smlnum*gam > bet { + scale = 1 / gam + } + x[0] = b[0] * scale / tau1 + xnorm = math.Abs(x[0]) + return scale, xnorm, ok + } + + if n1+n2 == 3 { + // 1×2 or 2×1 case. + var ( + smin float64 + tmp [4]float64 // tmp is used as a 2×2 row-major matrix. + btmp [2]float64 + ) + if n1 == 1 && n2 == 2 { + // 1×2 case: TL11*[X11 X12] + sgn*[X11 X12]*op[TR11 TR12] = [B11 B12]. + // [TR21 TR22] + smin = math.Abs(tl[0]) + smin = math.Max(smin, math.Max(math.Abs(tr[0]), math.Abs(tr[1]))) + smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) + smin = math.Max(eps*smin, smlnum) + tmp[0] = tl[0] + sgn*tr[0] + tmp[3] = tl[0] + sgn*tr[ldtr+1] + if tranr { + tmp[1] = sgn * tr[1] + tmp[2] = sgn * tr[ldtr] + } else { + tmp[1] = sgn * tr[ldtr] + tmp[2] = sgn * tr[1] + } + btmp[0] = b[0] + btmp[1] = b[1] + } else { + // 2×1 case: op[TL11 TL12]*[X11] + sgn*[X11]*TR11 = [B11]. + // [TL21 TL22]*[X21] [X21] [B21] + smin = math.Abs(tr[0]) + smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) + smin = math.Max(eps*smin, smlnum) + tmp[0] = tl[0] + sgn*tr[0] + tmp[3] = tl[ldtl+1] + sgn*tr[0] + if tranl { + tmp[1] = tl[ldtl] + tmp[2] = tl[1] + } else { + tmp[1] = tl[1] + tmp[2] = tl[ldtl] + } + btmp[0] = b[0] + btmp[1] = b[ldb] + } + + // Solve 2×2 system using complete pivoting. + // Set pivots less than smin to smin. + + bi := blas64.Implementation() + ipiv := bi.Idamax(len(tmp), tmp[:], 1) + // Compute the upper triangular matrix [u11 u12]. + // [ 0 u22] + u11 := tmp[ipiv] + if math.Abs(u11) <= smin { + ok = false + u11 = smin + } + locu12 := [4]int{1, 0, 3, 2} // Index in tmp of the element on the same row as the pivot. + u12 := tmp[locu12[ipiv]] + locl21 := [4]int{2, 3, 0, 1} // Index in tmp of the element on the same column as the pivot. + l21 := tmp[locl21[ipiv]] / u11 + locu22 := [4]int{3, 2, 1, 0} // Index in tmp of the remaining element. + u22 := tmp[locu22[ipiv]] - l21*u12 + if math.Abs(u22) <= smin { + ok = false + u22 = smin + } + if ipiv&0x2 != 0 { // true for ipiv equal to 2 and 3. + // The pivot was in the second row, swap the elements of + // the right-hand side. + btmp[0], btmp[1] = btmp[1], btmp[0]-l21*btmp[1] + } else { + btmp[1] -= l21 * btmp[0] + } + scale = 1 + if 2*smlnum*math.Abs(btmp[1]) > math.Abs(u22) || 2*smlnum*math.Abs(btmp[0]) > math.Abs(u11) { + scale = 0.5 / math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) + btmp[0] *= scale + btmp[1] *= scale + } + // Solve the system [u11 u12] [x21] = [ btmp[0] ]. + // [ 0 u22] [x22] [ btmp[1] ] + x22 := btmp[1] / u22 + x21 := btmp[0]/u11 - (u12/u11)*x22 + if ipiv&0x1 != 0 { // true for ipiv equal to 1 and 3. + // The pivot was in the second column, swap the elements + // of the solution. + x21, x22 = x22, x21 + } + x[0] = x21 + if n1 == 1 { + x[1] = x22 + xnorm = math.Abs(x[0]) + math.Abs(x[1]) + } else { + x[ldx] = x22 + xnorm = math.Max(math.Abs(x[0]), math.Abs(x[ldx])) + } + return scale, xnorm, ok + } + + // 2×2 case: op[TL11 TL12]*[X11 X12] + SGN*[X11 X12]*op[TR11 TR12] = [B11 B12]. + // [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] + // + // Solve equivalent 4×4 system using complete pivoting. + // Set pivots less than smin to smin. + + smin := math.Max(math.Abs(tr[0]), math.Abs(tr[1])) + smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) + smin = math.Max(eps*smin, smlnum) + + var t [4][4]float64 + t[0][0] = tl[0] + sgn*tr[0] + t[1][1] = tl[0] + sgn*tr[ldtr+1] + t[2][2] = tl[ldtl+1] + sgn*tr[0] + t[3][3] = tl[ldtl+1] + sgn*tr[ldtr+1] + if tranl { + t[0][2] = tl[ldtl] + t[1][3] = tl[ldtl] + t[2][0] = tl[1] + t[3][1] = tl[1] + } else { + t[0][2] = tl[1] + t[1][3] = tl[1] + t[2][0] = tl[ldtl] + t[3][1] = tl[ldtl] + } + if tranr { + t[0][1] = sgn * tr[1] + t[1][0] = sgn * tr[ldtr] + t[2][3] = sgn * tr[1] + t[3][2] = sgn * tr[ldtr] + } else { + t[0][1] = sgn * tr[ldtr] + t[1][0] = sgn * tr[1] + t[2][3] = sgn * tr[ldtr] + t[3][2] = sgn * tr[1] + } + + var btmp [4]float64 + btmp[0] = b[0] + btmp[1] = b[1] + btmp[2] = b[ldb] + btmp[3] = b[ldb+1] + + // Perform elimination. + var jpiv [4]int // jpiv records any column swaps for pivoting. + for i := 0; i < 3; i++ { + var ( + xmax float64 + ipsv, jpsv int + ) + for ip := i; ip < 4; ip++ { + for jp := i; jp < 4; jp++ { + if math.Abs(t[ip][jp]) >= xmax { + xmax = math.Abs(t[ip][jp]) + ipsv = ip + jpsv = jp + } + } + } + if ipsv != i { + // The pivot is not in the top row of the unprocessed + // block, swap rows ipsv and i of t and btmp. + t[ipsv], t[i] = t[i], t[ipsv] + btmp[ipsv], btmp[i] = btmp[i], btmp[ipsv] + } + if jpsv != i { + // The pivot is not in the left column of the + // unprocessed block, swap columns jpsv and i of t. + for k := 0; k < 4; k++ { + t[k][jpsv], t[k][i] = t[k][i], t[k][jpsv] + } + } + jpiv[i] = jpsv + if math.Abs(t[i][i]) < smin { + ok = false + t[i][i] = smin + } + for k := i + 1; k < 4; k++ { + t[k][i] /= t[i][i] + btmp[k] -= t[k][i] * btmp[i] + for j := i + 1; j < 4; j++ { + t[k][j] -= t[k][i] * t[i][j] + } + } + } + if math.Abs(t[3][3]) < smin { + ok = false + t[3][3] = smin + } + scale = 1 + if 8*smlnum*math.Abs(btmp[0]) > math.Abs(t[0][0]) || + 8*smlnum*math.Abs(btmp[1]) > math.Abs(t[1][1]) || + 8*smlnum*math.Abs(btmp[2]) > math.Abs(t[2][2]) || + 8*smlnum*math.Abs(btmp[3]) > math.Abs(t[3][3]) { + + maxbtmp := math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) + maxbtmp = math.Max(maxbtmp, math.Max(math.Abs(btmp[2]), math.Abs(btmp[3]))) + scale = 1 / 8 / maxbtmp + btmp[0] *= scale + btmp[1] *= scale + btmp[2] *= scale + btmp[3] *= scale + } + // Compute the solution of the upper triangular system t * tmp = btmp. + var tmp [4]float64 + for i := 3; i >= 0; i-- { + temp := 1 / t[i][i] + tmp[i] = btmp[i] * temp + for j := i + 1; j < 4; j++ { + tmp[i] -= temp * t[i][j] * tmp[j] + } + } + for i := 2; i >= 0; i-- { + if jpiv[i] != i { + tmp[i], tmp[jpiv[i]] = tmp[jpiv[i]], tmp[i] + } + } + x[0] = tmp[0] + x[1] = tmp[1] + x[ldx] = tmp[2] + x[ldx+1] = tmp[3] + xnorm = math.Max(math.Abs(tmp[0])+math.Abs(tmp[1]), math.Abs(tmp[2])+math.Abs(tmp[3])) + return scale, xnorm, ok +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go new file mode 100644 index 00000000..04eba4a0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go @@ -0,0 +1,145 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlatrd reduces nb rows and columns of a real n×n symmetric matrix A to symmetric +// tridiagonal form. It computes the orthonormal similarity transformation +// Q^T * A * Q +// and returns the matrices V and W to apply to the unreduced part of A. If +// uplo == blas.Upper, the upper triangle is supplied and the last nb rows are +// reduced. If uplo == blas.Lower, the lower triangle is supplied and the first +// nb rows are reduced. +// +// a contains the symmetric matrix on entry with active triangular half specified +// by uplo. On exit, the nb columns have been reduced to tridiagonal form. The +// diagonal contains the diagonal of the reduced matrix, the off-diagonal is +// set to 1, and the remaining elements contain the data to construct Q. +// +// If uplo == blas.Upper, with n = 5 and nb = 2 on exit a is +// [ a a a v4 v5] +// [ a a v4 v5] +// [ a 1 v5] +// [ d 1] +// [ d] +// +// If uplo == blas.Lower, with n = 5 and nb = 2, on exit a is +// [ d ] +// [ 1 d ] +// [v1 1 a ] +// [v1 v2 a a ] +// [v1 v2 a a a] +// +// e contains the superdiagonal elements of the reduced matrix. If uplo == blas.Upper, +// e[n-nb:n-1] contains the last nb columns of the reduced matrix, while if +// uplo == blas.Lower, e[:nb] contains the first nb columns of the reduced matrix. +// e must have length at least n-1, and Dlatrd will panic otherwise. +// +// tau contains the scalar factors of the elementary reflectors needed to construct Q. +// The reflectors are stored in tau[n-nb:n-1] if uplo == blas.Upper, and in +// tau[:nb] if uplo == blas.Lower. tau must have length n-1, and Dlatrd will panic +// otherwise. +// +// w is an n×nb matrix. On exit it contains the data to update the unreduced part +// of A. +// +// The matrix Q is represented as a product of elementary reflectors. Each reflector +// H has the form +// I - tau * v * v^T +// If uplo == blas.Upper, +// Q = H_{n-1} * H_{n-2} * ... * H_{n-nb} +// where v[:i-1] is stored in A[:i-1,i], v[i-1] = 1, and v[i:n] = 0. +// +// If uplo == blas.Lower, +// Q = H_0 * H_1 * ... * H_{nb-1} +// where v[:i+1] = 0, v[i+1] = 1, and v[i+2:n] is stored in A[i+2:n,i]. +// +// The vectors v form the n×nb matrix V which is used with W to apply a +// symmetric rank-2 update to the unreduced part of A +// A = A - V * W^T - W * V^T +// +// Dlatrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlatrd(uplo blas.Uplo, n, nb int, a []float64, lda int, e, tau, w []float64, ldw int) { + checkMatrix(n, n, a, lda) + checkMatrix(n, nb, w, ldw) + if len(e) < n-1 { + panic(badE) + } + if len(tau) < n-1 { + panic(badTau) + } + if n <= 0 { + return + } + bi := blas64.Implementation() + if uplo == blas.Upper { + for i := n - 1; i >= n-nb; i-- { + iw := i - n + nb + if i < n-1 { + // Update A(0:i, i). + bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, a[i+1:], lda, + w[i*ldw+iw+1:], 1, 1, a[i:], lda) + bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, w[iw+1:], ldw, + a[i*lda+i+1:], 1, 1, a[i:], lda) + } + if i > 0 { + // Generate elementary reflector H_i to annihilate A(0:i-2,i). + e[i-1], tau[i-1] = impl.Dlarfg(i, a[(i-1)*lda+i], a[i:], lda) + a[(i-1)*lda+i] = 1 + + // Compute W(0:i-1, i). + bi.Dsymv(blas.Upper, i, 1, a, lda, a[i:], lda, 0, w[iw:], ldw) + if i < n-1 { + bi.Dgemv(blas.Trans, i, n-i-1, 1, w[iw+1:], ldw, + a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) + bi.Dgemv(blas.NoTrans, i, n-i-1, -1, a[i+1:], lda, + w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) + bi.Dgemv(blas.Trans, i, n-i-1, 1, a[i+1:], lda, + a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) + bi.Dgemv(blas.NoTrans, i, n-i-1, -1, w[iw+1:], ldw, + w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) + } + bi.Dscal(i, tau[i-1], w[iw:], ldw) + alpha := -0.5 * tau[i-1] * bi.Ddot(i, w[iw:], ldw, a[i:], lda) + bi.Daxpy(i, alpha, a[i:], lda, w[iw:], ldw) + } + } + } else { + // Reduce first nb columns of lower triangle. + for i := 0; i < nb; i++ { + // Update A(i:n, i) + bi.Dgemv(blas.NoTrans, n-i, i, -1, a[i*lda:], lda, + w[i*ldw:], 1, 1, a[i*lda+i:], lda) + bi.Dgemv(blas.NoTrans, n-i, i, -1, w[i*ldw:], ldw, + a[i*lda:], 1, 1, a[i*lda+i:], lda) + if i < n-1 { + // Generate elementary reflector H_i to annihilate A(i+2:n,i). + e[i], tau[i] = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + a[(i+1)*lda+i] = 1 + + // Compute W(i+1:n,i). + bi.Dsymv(blas.Lower, n-i-1, 1, a[(i+1)*lda+i+1:], lda, + a[(i+1)*lda+i:], lda, 0, w[(i+1)*ldw+i:], ldw) + bi.Dgemv(blas.Trans, n-i-1, i, 1, w[(i+1)*ldw:], ldw, + a[(i+1)*lda+i:], lda, 0, w[i:], ldw) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, a[(i+1)*lda:], lda, + w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) + bi.Dgemv(blas.Trans, n-i-1, i, 1, a[(i+1)*lda:], lda, + a[(i+1)*lda+i:], lda, 0, w[i:], ldw) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, w[(i+1)*ldw:], ldw, + w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) + bi.Dscal(n-i-1, tau[i], w[(i+1)*ldw+i:], ldw) + alpha := -0.5 * tau[i] * bi.Ddot(n-i-1, w[(i+1)*ldw+i:], ldw, + a[(i+1)*lda+i:], lda) + bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, + w[(i+1)*ldw+i:], ldw) + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go new file mode 100644 index 00000000..f0c94764 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go @@ -0,0 +1,350 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlatrs solves a triangular system of equations scaled to prevent overflow. It +// solves +// A * x = scale * b if trans == blas.NoTrans +// A^T * x = scale * b if trans == blas.Trans +// where the scale s is set for numeric stability. +// +// A is an n×n triangular matrix. On entry, the slice x contains the values of +// of b, and on exit it contains the solution vector x. +// +// If normin == true, cnorm is an input and cnorm[j] contains the norm of the off-diagonal +// part of the j^th column of A. If trans == blas.NoTrans, cnorm[j] must be greater +// than or equal to the infinity norm, and greater than or equal to the one-norm +// otherwise. If normin == false, then cnorm is treated as an output, and is set +// to contain the 1-norm of the off-diagonal part of the j^th column of A. +// +// Dlatrs is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlatrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, normin bool, n int, a []float64, lda int, x []float64, cnorm []float64) (scale float64) { + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if trans != blas.Trans && trans != blas.NoTrans { + panic(badTrans) + } + if diag != blas.Unit && diag != blas.NonUnit { + panic(badDiag) + } + upper := uplo == blas.Upper + noTrans := trans == blas.NoTrans + nonUnit := diag == blas.NonUnit + + if n < 0 { + panic(nLT0) + } + checkMatrix(n, n, a, lda) + checkVector(n, x, 1) + checkVector(n, cnorm, 1) + + if n == 0 { + return 0 + } + smlnum := dlamchS / dlamchP + bignum := 1 / smlnum + scale = 1 + bi := blas64.Implementation() + if !normin { + if upper { + cnorm[0] = 0 + for j := 1; j < n; j++ { + cnorm[j] = bi.Dasum(j, a[j:], lda) + } + } else { + for j := 0; j < n-1; j++ { + cnorm[j] = bi.Dasum(n-j-1, a[(j+1)*lda+j:], lda) + } + cnorm[n-1] = 0 + } + } + // Scale the column norms by tscal if the maximum element in cnorm is greater than bignum. + imax := bi.Idamax(n, cnorm, 1) + tmax := cnorm[imax] + var tscal float64 + if tmax <= bignum { + tscal = 1 + } else { + tscal = 1 / (smlnum * tmax) + bi.Dscal(n, tscal, cnorm, 1) + } + + // Compute a bound on the computed solution vector to see if bi.Dtrsv can be used. + j := bi.Idamax(n, x, 1) + xmax := math.Abs(x[j]) + xbnd := xmax + var grow float64 + var jfirst, jlast, jinc int + if noTrans { + if upper { + jfirst = n - 1 + jlast = -1 + jinc = -1 + } else { + jfirst = 0 + jlast = n + jinc = 1 + } + // Compute the growth in A * x = b. + if tscal != 1 { + grow = 0 + goto Solve + } + if nonUnit { + grow = 1 / math.Max(xbnd, smlnum) + xbnd = grow + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + tjj := math.Abs(a[j*lda+j]) + xbnd = math.Min(xbnd, math.Min(1, tjj)*grow) + if tjj+cnorm[j] >= smlnum { + grow *= tjj / (tjj + cnorm[j]) + } else { + grow = 0 + } + } + grow = xbnd + } else { + grow = math.Min(1, 1/math.Max(xbnd, smlnum)) + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + grow *= 1 / (1 + cnorm[j]) + } + } + } else { + if upper { + jfirst = 0 + jlast = n + jinc = 1 + } else { + jfirst = n - 1 + jlast = -1 + jinc = -1 + } + if tscal != 1 { + grow = 0 + goto Solve + } + if nonUnit { + grow = 1 / (math.Max(xbnd, smlnum)) + xbnd = grow + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + xj := 1 + cnorm[j] + grow = math.Min(grow, xbnd/xj) + tjj := math.Abs(a[j*lda+j]) + if xj > tjj { + xbnd *= tjj / xj + } + } + grow = math.Min(grow, xbnd) + } else { + grow = math.Min(1, 1/math.Max(xbnd, smlnum)) + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + xj := 1 + cnorm[j] + grow /= xj + } + } + } + +Solve: + if grow*tscal > smlnum { + // Use the Level 2 BLAS solve if the reciprocal of the bound on + // elements of X is not too small. + bi.Dtrsv(uplo, trans, diag, n, a, lda, x, 1) + if tscal != 1 { + bi.Dscal(n, 1/tscal, cnorm, 1) + } + return scale + } + + // Use a Level 1 BLAS solve, scaling intermediate results. + if xmax > bignum { + scale = bignum / xmax + bi.Dscal(n, scale, x, 1) + xmax = bignum + } + if noTrans { + for j := jfirst; j != jlast; j += jinc { + xj := math.Abs(x[j]) + var tjj, tjjs float64 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + if tscal == 1 { + goto Skip1 + } + } + tjj = math.Abs(tjjs) + if tjj > smlnum { + if tjj < 1 { + if xj > tjj*bignum { + rec := 1 / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + x[j] /= tjjs + xj = math.Abs(x[j]) + } else if tjj > 0 { + if xj > tjj*bignum { + rec := (tjj * bignum) / xj + if cnorm[j] > 1 { + rec /= cnorm[j] + } + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + x[j] /= tjjs + xj = math.Abs(x[j]) + } else { + for i := 0; i < n; i++ { + x[i] = 0 + } + x[j] = 1 + xj = 1 + scale = 0 + xmax = 0 + } + Skip1: + if xj > 1 { + rec := 1 / xj + if cnorm[j] > (bignum-xmax)*rec { + rec *= 0.5 + bi.Dscal(n, rec, x, 1) + scale *= rec + } + } else if xj*cnorm[j] > bignum-xmax { + bi.Dscal(n, 0.5, x, 1) + scale *= 0.5 + } + if upper { + if j > 0 { + bi.Daxpy(j, -x[j]*tscal, a[j:], lda, x, 1) + i := bi.Idamax(j, x, 1) + xmax = math.Abs(x[i]) + } + } else { + if j < n-1 { + bi.Daxpy(n-j-1, -x[j]*tscal, a[(j+1)*lda+j:], lda, x[j+1:], 1) + i := j + bi.Idamax(n-j-1, x[j+1:], 1) + xmax = math.Abs(x[i]) + } + } + } + } else { + for j := jfirst; j != jlast; j += jinc { + xj := math.Abs(x[j]) + uscal := tscal + rec := 1 / math.Max(xmax, 1) + var tjjs float64 + if cnorm[j] > (bignum-xj)*rec { + rec *= 0.5 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + } + tjj := math.Abs(tjjs) + if tjj > 1 { + rec = math.Min(1, rec*tjj) + uscal /= tjjs + } + if rec < 1 { + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + var sumj float64 + if uscal == 1 { + if upper { + sumj = bi.Ddot(j, a[j:], lda, x, 1) + } else if j < n-1 { + sumj = bi.Ddot(n-j-1, a[(j+1)*lda+j:], lda, x[j+1:], 1) + } + } else { + if upper { + for i := 0; i < j; i++ { + sumj += (a[i*lda+j] * uscal) * x[i] + } + } else if j < n { + for i := j + 1; i < n; i++ { + sumj += (a[i*lda+j] * uscal) * x[i] + } + } + } + if uscal == tscal { + x[j] -= sumj + xj := math.Abs(x[j]) + var tjjs float64 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + if tscal == 1 { + goto Skip2 + } + } + tjj := math.Abs(tjjs) + if tjj > smlnum { + if tjj < 1 { + if xj > tjj*bignum { + rec = 1 / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + x[j] /= tjjs + } else if tjj > 0 { + if xj > tjj*bignum { + rec = (tjj * bignum) / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + x[j] /= tjjs + } else { + for i := 0; i < n; i++ { + x[i] = 0 + } + x[j] = 1 + scale = 0 + xmax = 0 + } + } else { + x[j] = x[j]/tjjs - sumj + } + Skip2: + xmax = math.Max(xmax, math.Abs(x[j])) + } + } + scale /= tscal + if tscal != 1 { + bi.Dscal(n, 1/tscal, cnorm, 1) + } + return scale +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go new file mode 100644 index 00000000..079650f3 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go @@ -0,0 +1,28 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gonum is a pure-go implementation of the LAPACK API. The LAPACK API defines +// a set of algorithms for advanced matrix operations. +// +// The function definitions and implementations follow that of the netlib reference +// implementation. See http://www.netlib.org/lapack/explore-html/ for more +// information, and http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html +// for more license information. +// +// Slice function arguments frequently represent vectors and matrices. The data +// layout is identical to that found in https://godoc.org/gonum.org/v1/gonum/blas/gonum. +// +// Most LAPACK functions are built on top the routines defined in the BLAS API, +// and as such the computation time for many LAPACK functions is +// dominated by BLAS calls. Here, BLAS is accessed through the +// the blas64 package (https://godoc.org/golang.org/v1/gonum/blas/blas64). In particular, +// this implies that an external BLAS library will be used if it is +// registered in blas64. +// +// The full LAPACK capability has not been implemented at present. The full +// API is very large, containing approximately 200 functions for double precision +// alone. Future additions will be focused on supporting the gonum matrix +// package (https://godoc.org/github.com/gonum/matrix/mat64), though pull requests +// with implementations and tests for LAPACK function are encouraged. +package gonum diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go new file mode 100644 index 00000000..32071986 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go @@ -0,0 +1,65 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorg2l generates an m×n matrix Q with orthonormal columns which is defined +// as the last n columns of a product of k elementary reflectors of order m. +// Q = H_{k-1} * ... * H_1 * H_0 +// See Dgelqf for more information. It must be that m >= n >= k. +// +// tau contains the scalar reflectors computed by Dgeqlf. tau must have length +// at least k, and Dorg2l will panic otherwise. +// +// work contains temporary memory, and must have length at least n. Dorg2l will +// panic otherwise. +// +// Dorg2l is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorg2l(m, n, k int, a []float64, lda int, tau, work []float64) { + checkMatrix(m, n, a, lda) + if len(tau) < k { + panic(badTau) + } + if len(work) < n { + panic(badWork) + } + if m < n { + panic(mLTN) + } + if k > n { + panic(kGTN) + } + if n == 0 { + return + } + + // Initialize columns 0:n-k to columns of the unit matrix. + for j := 0; j < n-k; j++ { + for l := 0; l < m; l++ { + a[l*lda+j] = 0 + } + a[(m-n+j)*lda+j] = 1 + } + + bi := blas64.Implementation() + for i := 0; i < k; i++ { + ii := n - k + i + + // Apply H_i to A[0:m-k+i, 0:n-k+i] from the left. + a[(m-n+ii)*lda+ii] = 1 + impl.Dlarf(blas.Left, m-n+ii+1, ii, a[ii:], lda, tau[i], a, lda, work) + bi.Dscal(m-n+ii, -tau[i], a[ii:], lda) + a[(m-n+ii)*lda+ii] = 1 - tau[i] + + // Set A[m-k+i:m, n-k+i+1] to zero. + for l := m - n + ii + 1; l < m; l++ { + a[l*lda+ii] = 0 + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go new file mode 100644 index 00000000..d7525017 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go @@ -0,0 +1,65 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorg2r generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors as computed by Dgeqrf. +// Q = H_0 * H_1 * ... * H_{k-1} +// len(tau) >= k, 0 <= k <= n, 0 <= n <= m, len(work) >= n. +// Dorg2r will panic if these conditions are not met. +// +// Dorg2r is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorg2r(m, n, k int, a []float64, lda int, tau []float64, work []float64) { + checkMatrix(m, n, a, lda) + if len(tau) < k { + panic(badTau) + } + if len(work) < n { + panic(badWork) + } + if k > n { + panic(kGTN) + } + if n > m { + panic(mLTN) + } + if len(work) < n { + panic(badWork) + } + if n == 0 { + return + } + bi := blas64.Implementation() + // Initialize columns k+1:n to columns of the unit matrix. + for l := 0; l < m; l++ { + for j := k; j < n; j++ { + a[l*lda+j] = 0 + } + } + for j := k; j < n; j++ { + a[j*lda+j] = 1 + } + for i := k - 1; i >= 0; i-- { + for i := range work { + work[i] = 0 + } + if i < n-1 { + a[i*lda+i] = 1 + impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tau[i], a[i*lda+i+1:], lda, work) + } + if i < m-1 { + bi.Dscal(m-i-1, -tau[i], a[(i+1)*lda+i:], lda) + } + a[i*lda+i] = 1 - tau[i] + for l := 0; l < i; l++ { + a[l*lda+i] = 0 + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go new file mode 100644 index 00000000..c3c4c90a --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go @@ -0,0 +1,124 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Dorgbr generates one of the matrices Q or P^T computed by Dgebrd +// computed from the decomposition Dgebrd. See Dgebd2 for the description of +// Q and P^T. +// +// If vect == lapack.ApplyQ, then a is assumed to have been an m×k matrix and +// Q is of order m. If m >= k, then Dorgbr returns the first n columns of Q +// where m >= n >= k. If m < k, then Dorgbr returns Q as an m×m matrix. +// +// If vect == lapack.ApplyP, then A is assumed to have been a k×n matrix, and +// P^T is of order n. If k < n, then Dorgbr returns the first m rows of P^T, +// where n >= m >= k. If k >= n, then Dorgbr returns P^T as an n×n matrix. +// +// Dorgbr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgbr(vect lapack.DecompUpdate, m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + mn := min(m, n) + wantq := vect == lapack.ApplyQ + if wantq { + if m < n || n < min(m, k) || m < min(m, k) { + panic(badDims) + } + } else { + if n < m || m < min(n, k) || n < min(n, k) { + panic(badDims) + } + } + if wantq { + if m >= k { + checkMatrix(m, k, a, lda) + } else { + checkMatrix(m, m, a, lda) + } + } else { + if n >= k { + checkMatrix(k, n, a, lda) + } else { + checkMatrix(n, n, a, lda) + } + } + work[0] = 1 + if wantq { + if m >= k { + impl.Dorgqr(m, n, k, a, lda, tau, work, -1) + } else if m > 1 { + impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, -1) + } + } else { + if k < n { + impl.Dorglq(m, n, k, a, lda, tau, work, -1) + } else if n > 1 { + impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, -1) + } + } + lworkopt := int(work[0]) + lworkopt = max(lworkopt, mn) + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + if len(work) < lwork { + panic(badWork) + } + if lwork < mn { + panic(badWork) + } + if m == 0 || n == 0 { + work[0] = 1 + return + } + if wantq { + // Form Q, determined by a call to Dgebrd to reduce an m×k matrix. + if m >= k { + impl.Dorgqr(m, n, k, a, lda, tau, work, lwork) + } else { + // Shift the vectors which define the elementary reflectors one + // column to the right, and set the first row and column of Q to + // those of the unit matrix. + for j := m - 1; j >= 1; j-- { + a[j] = 0 + for i := j + 1; i < m; i++ { + a[i*lda+j] = a[i*lda+j-1] + } + } + a[0] = 1 + for i := 1; i < m; i++ { + a[i*lda] = 0 + } + if m > 1 { + // Form Q[1:m-1, 1:m-1] + impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, lwork) + } + } + } else { + // Form P^T, determined by a call to Dgebrd to reduce a k×n matrix. + if k < n { + impl.Dorglq(m, n, k, a, lda, tau, work, lwork) + } else { + // Shift the vectors which define the elementary reflectors one + // row downward, and set the first row and column of P^T to + // those of the unit matrix. + a[0] = 1 + for i := 1; i < n; i++ { + a[i*lda] = 0 + } + for j := 1; j < n; j++ { + for i := j - 1; i >= 1; i-- { + a[i*lda+j] = a[(i-1)*lda+j] + } + a[j] = 0 + } + if n > 1 { + impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) + } + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go new file mode 100644 index 00000000..b7ea7b2c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go @@ -0,0 +1,93 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Dorghr generates an n×n orthogonal matrix Q which is defined as the product +// of ihi-ilo elementary reflectors: +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// +// a and lda represent an n×n matrix that contains the elementary reflectors, as +// returned by Dgehrd. On return, a is overwritten by the n×n orthogonal matrix +// Q. Q will be equal to the identity matrix except in the submatrix +// Q[ilo+1:ihi+1,ilo+1:ihi+1]. +// +// ilo and ihi must have the same values as in the previous call of Dgehrd. It +// must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo = 0, ihi = -1, if n == 0. +// +// tau contains the scalar factors of the elementary reflectors, as returned by +// Dgehrd. tau must have length n-1. +// +// work must have length at least max(1,lwork) and lwork must be at least +// ihi-ilo. For optimum performance lwork must be at least (ihi-ilo)*nb where nb +// is the optimal blocksize. On return, work[0] will contain the optimal value +// of lwork. +// +// If lwork == -1, instead of performing Dorghr, only the optimal value of lwork +// will be stored into work[0]. +// +// If any requirement on input sizes is not met, Dorghr will panic. +// +// Dorghr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorghr(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { + checkMatrix(n, n, a, lda) + nh := ihi - ilo + switch { + case ilo < 0 || max(1, n) <= ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lwork < max(1, nh) && lwork != -1: + panic(badWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + lwkopt := max(1, nh) * impl.Ilaenv(1, "DORGQR", " ", nh, nh, nh, -1) + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + // Shift the vectors which define the elementary reflectors one column + // to the right. + for i := ilo + 2; i < ihi+1; i++ { + copy(a[i*lda+ilo+1:i*lda+i], a[i*lda+ilo:i*lda+i-1]) + } + // Set the first ilo+1 and the last n-ihi-1 rows and columns to those of + // the identity matrix. + for i := 0; i < ilo+1; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + a[i*lda+i] = 1 + } + for i := ilo + 1; i < ihi+1; i++ { + for j := 0; j <= ilo; j++ { + a[i*lda+j] = 0 + } + for j := i; j < n; j++ { + a[i*lda+j] = 0 + } + } + for i := ihi + 1; i < n; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + a[i*lda+i] = 1 + } + if nh > 0 { + // Generate Q[ilo+1:ihi+1,ilo+1:ihi+1]. + impl.Dorgqr(nh, nh, nh, a[(ilo+1)*lda+ilo+1:], lda, tau[ilo:ihi], work, lwork) + } + work[0] = float64(lwkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go new file mode 100644 index 00000000..06303812 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go @@ -0,0 +1,63 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorgl2 generates an m×n matrix Q with orthonormal rows defined by the +// first m rows product of elementary reflectors as computed by Dgelqf. +// Q = H_0 * H_1 * ... * H_{k-1} +// len(tau) >= k, 0 <= k <= m, 0 <= m <= n, len(work) >= m. +// Dorgl2 will panic if these conditions are not met. +// +// Dorgl2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgl2(m, n, k int, a []float64, lda int, tau, work []float64) { + checkMatrix(m, n, a, lda) + if len(tau) < k { + panic(badTau) + } + if k > m { + panic(kGTM) + } + if k > m { + panic(kGTM) + } + if m > n { + panic(nLTM) + } + if len(work) < m { + panic(badWork) + } + if m == 0 { + return + } + bi := blas64.Implementation() + if k < m { + for i := k; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + } + for j := k; j < m; j++ { + a[j*lda+j] = 1 + } + } + for i := k - 1; i >= 0; i-- { + if i < n-1 { + if i < m-1 { + a[i*lda+i] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tau[i], a[(i+1)*lda+i:], lda, work) + } + bi.Dscal(n-i-1, -tau[i], a[i*lda+i+1:], 1) + } + a[i*lda+i] = 1 - tau[i] + for l := 0; l < i; l++ { + a[i*lda+l] = 0 + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go new file mode 100644 index 00000000..4f45a6a3 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go @@ -0,0 +1,117 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorglq generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors as computed by Dgelqf. +// Q = H_0 * H_1 * ... * H_{k-1} +// Dorglq is the blocked version of Dorgl2 that makes greater use of level-3 BLAS +// routines. +// +// len(tau) >= k, 0 <= k <= n, and 0 <= n <= m. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= m, and the amount of blocking is limited by the usable length. +// If lwork == -1, instead of computing Dorglq the optimal work length is stored +// into work[0]. +// +// Dorglq will panic if the conditions on input values are not met. +// +// Dorglq is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorglq(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + nb := impl.Ilaenv(1, "DORGLQ", " ", m, n, k, -1) + // work is treated as an n×nb matrix + if lwork == -1 { + work[0] = float64(max(1, m) * nb) + return + } + checkMatrix(m, n, a, lda) + if k < 0 { + panic(kLT0) + } + if k > m { + panic(kGTM) + } + if m > n { + panic(nLTM) + } + if len(tau) < k { + panic(badTau) + } + if len(work) < lwork { + panic(shortWork) + } + if lwork < m { + panic(badWork) + } + if m == 0 { + return + } + nbmin := 2 // Minimum number of blocks + var nx int // Minimum number of rows + iws := m // Length of work needed + var ldwork int + if nb > 1 && nb < k { + nx = max(0, impl.Ilaenv(3, "DORGLQ", " ", m, n, k, -1)) + if nx < k { + ldwork = nb + iws = m * ldwork + if lwork < iws { + nb = lwork / m + ldwork = nb + nbmin = max(2, impl.Ilaenv(2, "DORGLQ", " ", m, n, k, -1)) + } + } + } + var ki, kk int + if nb >= nbmin && nb < k && nx < k { + // The first kk rows are handled by the blocked method. + // Note: lapack has nx here, but this means the last nx rows are handled + // serially which could be quite different than nb. + ki = ((k - nb - 1) / nb) * nb + kk = min(k, ki+nb) + for i := kk; i < m; i++ { + for j := 0; j < kk; j++ { + a[i*lda+j] = 0 + } + } + } + if kk < m { + // Perform the operation on colums kk to the end. + impl.Dorgl2(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) + } + if kk == 0 { + return + } + // Perform the operation on column-blocks + for i := ki; i >= 0; i -= nb { + ib := min(nb, k-i) + if i+ib < m { + impl.Dlarft(lapack.Forward, lapack.RowWise, + n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + + impl.Dlarfb(blas.Right, blas.Trans, lapack.Forward, lapack.RowWise, + m-i-ib, n-i, ib, + a[i*lda+i:], lda, + work, ldwork, + a[(i+ib)*lda+i:], lda, + work[ib*ldwork:], ldwork) + } + impl.Dorgl2(ib, n-i, ib, a[i*lda+i:], lda, tau[i:], work) + for l := i; l < i+ib; l++ { + for j := 0; j < i; j++ { + a[l*lda+j] = 0 + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go new file mode 100644 index 00000000..35967d72 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go @@ -0,0 +1,130 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorgql generates the m×n matrix Q with orthonormal columns defined as the +// last n columns of a product of k elementary reflectors of order m +// Q = H_{k-1} * ... * H_1 * H_0. +// +// It must hold that +// 0 <= k <= n <= m, +// and Dorgql will panic otherwise. +// +// On entry, the (n-k+i)-th column of A must contain the vector which defines +// the elementary reflector H_i, for i=0,...,k-1, and tau[i] must contain its +// scalar factor. On return, a contains the m×n matrix Q. +// +// tau must have length at least k, and Dorgql will panic otherwise. +// +// work must have length at least max(1,lwork), and lwork must be at least +// max(1,n), otherwise Dorgql will panic. For optimum performance lwork must +// be a sufficiently large multiple of n. +// +// If lwork == -1, instead of computing Dorgql the optimal work length is stored +// into work[0]. +// +// Dorgql is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgql(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case n < 0: + panic(nLT0) + case m < n: + panic(mLTN) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lwork < max(1, n) && lwork != -1: + panic(badWork) + case len(work) < lwork: + panic(shortWork) + } + if lwork != -1 { + checkMatrix(m, n, a, lda) + if len(tau) < k { + panic(badTau) + } + } + + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DORGQL", " ", m, n, k, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return + } + + nbmin := 2 + var nx, ldwork int + iws := n + if nb > 1 && nb < k { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(3, "DORGQL", " ", m, n, k, -1)) + if nx < k { + // Determine if workspace is large enough for blocked code. + iws = n * nb + if lwork < iws { + // Not enough workspace to use optimal nb: reduce nb and determine + // the minimum value of nb. + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DORGQL", " ", m, n, k, -1)) + } + ldwork = nb + } + } + + var kk int + if nb >= nbmin && nb < k && nx < k { + // Use blocked code after the first block. The last kk columns are handled + // by the block method. + kk = min(k, ((k-nx+nb-1)/nb)*nb) + + // Set A(m-kk:m, 0:n-kk) to zero. + for i := m - kk; i < m; i++ { + for j := 0; j < n-kk; j++ { + a[i*lda+j] = 0 + } + } + } + + // Use unblocked code for the first or only block. + impl.Dorg2l(m-kk, n-kk, k-kk, a, lda, tau, work) + if kk > 0 { + // Use blocked code. + for i := k - kk; i < k; i += nb { + ib := min(nb, k-i) + if n-k+i > 0 { + // Form the triangular factor of the block reflector + // H = H_{i+ib-1} * ... * H_{i+1} * H_i. + impl.Dlarft(lapack.Backward, lapack.ColumnWise, m-k+i+ib, ib, + a[n-k+i:], lda, tau[i:], work, ldwork) + + // Apply H to A[0:m-k+i+ib, 0:n-k+i] from the left. + impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Backward, lapack.ColumnWise, + m-k+i+ib, n-k+i, ib, a[n-k+i:], lda, work, ldwork, + a, lda, work[ib*ldwork:], ldwork) + } + + // Apply H to rows 0:m-k+i+ib of current block. + impl.Dorg2l(m-k+i+ib, ib, ib, a[n-k+i:], lda, tau[i:], work) + + // Set rows m-k+i+ib:m of current block to zero. + for j := n - k + i; j < n-k+i+ib; j++ { + for l := m - k + i + ib; l < m; l++ { + a[l*lda+j] = 0 + } + } + } + } + work[0] = float64(iws) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go new file mode 100644 index 00000000..6b8fb742 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go @@ -0,0 +1,120 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorgqr generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1} +// as computed by Dgeqrf. +// Dorgqr is the blocked version of Dorg2r that makes greater use of level-3 BLAS +// routines. +// +// The length of tau must be at least k, and the length of work must be at least n. +// It also must be that 0 <= k <= n and 0 <= n <= m. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= n, and the amount of blocking is limited by the usable +// length. If lwork == -1, instead of computing Dorgqr the optimal work length +// is stored into work[0]. +// +// Dorgqr will panic if the conditions on input values are not met. +// +// Dorgqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgqr(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + nb := impl.Ilaenv(1, "DORGQR", " ", m, n, k, -1) + // work is treated as an n×nb matrix + if lwork == -1 { + work[0] = float64(max(1, n) * nb) + return + } + checkMatrix(m, n, a, lda) + if k < 0 { + panic(kLT0) + } + if k > n { + panic(kGTN) + } + if n > m { + panic(mLTN) + } + if len(tau) < k { + panic(badTau) + } + if len(work) < lwork { + panic(shortWork) + } + if lwork < n { + panic(badWork) + } + if n == 0 { + return + } + nbmin := 2 // Minimum number of blocks + var nx int // Minimum number of rows + iws := n // Length of work needed + var ldwork int + if nb > 1 && nb < k { + nx = max(0, impl.Ilaenv(3, "DORGQR", " ", m, n, k, -1)) + if nx < k { + ldwork = nb + iws = n * ldwork + if lwork < iws { + nb = lwork / n + ldwork = nb + nbmin = max(2, impl.Ilaenv(2, "DORGQR", " ", m, n, k, -1)) + } + } + } + var ki, kk int + if nb >= nbmin && nb < k && nx < k { + // The first kk columns are handled by the blocked method. + // Note: lapack has nx here, but this means the last nx rows are handled + // serially which could be quite different than nb. + ki = ((k - nb - 1) / nb) * nb + kk = min(k, ki+nb) + for j := kk; j < n; j++ { + for i := 0; i < kk; i++ { + a[i*lda+j] = 0 + } + } + } + if kk < n { + // Perform the operation on colums kk to the end. + impl.Dorg2r(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) + } + if kk == 0 { + return + } + // Perform the operation on column-blocks + for i := ki; i >= 0; i -= nb { + ib := min(nb, k-i) + if i+ib < n { + impl.Dlarft(lapack.Forward, lapack.ColumnWise, + m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + + impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Forward, lapack.ColumnWise, + m-i, n-i-ib, ib, + a[i*lda+i:], lda, + work, ldwork, + a[i*lda+i+ib:], lda, + work[ib*ldwork:], ldwork) + } + impl.Dorg2r(m-i, ib, ib, a[i*lda+i:], lda, tau[i:], work) + // Set rows 0:i-1 of current block to zero + for j := i; j < i+ib; j++ { + for l := 0; l < i; l++ { + a[l*lda+j] = 0 + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go new file mode 100644 index 00000000..6984ff55 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go @@ -0,0 +1,99 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorgtr generates a real orthogonal matrix Q which is defined as the product +// of n-1 elementary reflectors of order n as returned by Dsytrd. +// +// The construction of Q depends on the value of uplo: +// Q = H_{n-1} * ... * H_1 * H_0 if uplo == blas.Upper +// Q = H_0 * H_1 * ... * H_{n-1} if uplo == blas.Lower +// where H_i is constructed from the elementary reflectors as computed by Dsytrd. +// See the documentation for Dsytrd for more information. +// +// tau must have length at least n-1, and Dorgtr will panic otherwise. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= max(1,n-1), and Dorgtr will panic otherwise. The amount of blocking +// is limited by the usable length. +// If lwork == -1, instead of computing Dorgtr the optimal work length is stored +// into work[0]. +// +// Dorgtr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgtr(uplo blas.Uplo, n int, a []float64, lda int, tau, work []float64, lwork int) { + checkMatrix(n, n, a, lda) + if len(tau) < n-1 { + panic(badTau) + } + if len(work) < lwork { + panic(badWork) + } + if lwork < n-1 && lwork != -1 { + panic(badWork) + } + upper := uplo == blas.Upper + if !upper && uplo != blas.Lower { + panic(badUplo) + } + + if n == 0 { + work[0] = 1 + return + } + + var nb int + if upper { + nb = impl.Ilaenv(1, "DORGQL", " ", n-1, n-1, n-1, -1) + } else { + nb = impl.Ilaenv(1, "DORGQR", " ", n-1, n-1, n-1, -1) + } + lworkopt := max(1, n-1) * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + if upper { + // Q was determined by a call to Dsytrd with uplo == blas.Upper. + // Shift the vectors which define the elementary reflectors one column + // to the left, and set the last row and column of Q to those of the unit + // matrix. + for j := 0; j < n-1; j++ { + for i := 0; i < j; i++ { + a[i*lda+j] = a[i*lda+j+1] + } + a[(n-1)*lda+j] = 0 + } + for i := 0; i < n-1; i++ { + a[i*lda+n-1] = 0 + } + a[(n-1)*lda+n-1] = 1 + + // Generate Q[0:n-1, 0:n-1]. + impl.Dorgql(n-1, n-1, n-1, a, lda, tau, work, lwork) + } else { + // Q was determined by a call to Dsytrd with uplo == blas.Upper. + // Shift the vectors which define the elementary reflectors one column + // to the right, and set the first row and column of Q to those of the unit + // matrix. + for j := n - 1; j > 0; j-- { + a[j] = 0 + for i := j + 1; i < n; i++ { + a[i*lda+j] = a[i*lda+j-1] + } + } + a[0] = 1 + for i := 1; i < n; i++ { + a[i*lda] = 0 + } + if n > 1 { + // Generate Q[1:n, 1:n]. + impl.Dorgqr(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go new file mode 100644 index 00000000..e8fb1d4d --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go @@ -0,0 +1,88 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorm2r multiplies a general matrix C by an orthogonal matrix from a QR factorization +// determined by Dgeqrf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of size m×k, and if side == blas.Right +// a is of size n×k. +// +// tau contains the Householder factors and is of length at least k and this function +// will panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dorm2r is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorm2r(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + if side != blas.Left && side != blas.Right { + panic(badSide) + } + if trans != blas.Trans && trans != blas.NoTrans { + panic(badTrans) + } + + left := side == blas.Left + notran := trans == blas.NoTrans + if left { + // Q is m x m + checkMatrix(m, k, a, lda) + if len(work) < n { + panic(badWork) + } + } else { + // Q is n x n + checkMatrix(n, k, a, lda) + if len(work) < m { + panic(badWork) + } + } + checkMatrix(m, n, c, ldc) + if m == 0 || n == 0 || k == 0 { + return + } + if len(tau) < k { + panic(badTau) + } + if left { + if notran { + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + return + } + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + return + } + if notran { + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + return + } + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go new file mode 100644 index 00000000..250d23be --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go @@ -0,0 +1,158 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormbr applies a multiplicative update to the matrix C based on a +// decomposition computed by Dgebrd. +// +// Dormbr overwrites the m×n matrix C with +// Q * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.NoTrans +// C * Q if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.NoTrans +// Q^T * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.Trans +// C * Q^T if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.Trans +// +// P * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.NoTrans +// C * P if vect == lapack.ApplyP, side == blas.Right, and trans == blas.NoTrans +// P^T * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.Trans +// C * P^T if vect == lapack.ApplyP, side == blas.Right, and trans == blas.Trans +// where P and Q are the orthogonal matrices determined by Dgebrd when reducing +// a matrix A to bidiagonal form: A = Q * B * P^T. See Dgebrd for the +// definitions of Q and P. +// +// If vect == lapack.ApplyQ, A is assumed to have been an nq×k matrix, while if +// vect == lapack.ApplyP, A is assumed to have been a k×nq matrix. nq = m if +// side == blas.Left, while nq = n if side == blas.Right. +// +// tau must have length min(nq,k), and Dormbr will panic otherwise. tau contains +// the elementary reflectors to construct Q or P depending on the value of +// vect. +// +// work must have length at least max(1,lwork), and lwork must be either -1 or +// at least max(1,n) if side == blas.Left, and at least max(1,m) if side == +// blas.Right. For optimum performance lwork should be at least n*nb if side == +// blas.Left, and at least m*nb if side == blas.Right, where nb is the optimal +// block size. On return, work[0] will contain the optimal value of lwork. +// +// If lwork == -1, the function only calculates the optimal value of lwork and +// returns it in work[0]. +// +// Dormbr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormbr(vect lapack.DecompUpdate, side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + if side != blas.Left && side != blas.Right { + panic(badSide) + } + if trans != blas.NoTrans && trans != blas.Trans { + panic(badTrans) + } + if vect != lapack.ApplyP && vect != lapack.ApplyQ { + panic(badDecompUpdate) + } + nq := n + nw := m + if side == blas.Left { + nq = m + nw = n + } + if vect == lapack.ApplyQ { + checkMatrix(nq, min(nq, k), a, lda) + } else { + checkMatrix(min(nq, k), nq, a, lda) + } + if len(tau) < min(nq, k) { + panic(badTau) + } + checkMatrix(m, n, c, ldc) + if len(work) < lwork { + panic(shortWork) + } + if lwork < max(1, nw) && lwork != -1 { + panic(badWork) + } + + applyQ := vect == lapack.ApplyQ + left := side == blas.Left + var nb int + + // The current implementation does not use opts, but a future change may + // use these options so construct them. + var opts string + if side == blas.Left { + opts = "L" + } else { + opts = "R" + } + if trans == blas.Trans { + opts += "T" + } else { + opts += "N" + } + if applyQ { + if left { + nb = impl.Ilaenv(1, "DORMQR", opts, m-1, n, m-1, -1) + } else { + nb = impl.Ilaenv(1, "DORMQR", opts, m, n-1, n-1, -1) + } + } else { + if left { + nb = impl.Ilaenv(1, "DORMLQ", opts, m-1, n, m-1, -1) + } else { + nb = impl.Ilaenv(1, "DORMLQ", opts, m, n-1, n-1, -1) + } + } + lworkopt := max(1, nw) * nb + if lwork == -1 { + work[0] = float64(lworkopt) + } + if applyQ { + // Change the operation to get Q depending on the size of the initial + // matrix to Dgebrd. The size matters due to the storage location of + // the off-diagonal elements. + if nq >= k { + impl.Dormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork) + } else if nq > 1 { + mi := m + ni := n - 1 + i1 := 0 + i2 := 1 + if left { + mi = m - 1 + ni = n + i1 = 1 + i2 = 0 + } + impl.Dormqr(side, trans, mi, ni, nq-1, a[1*lda:], lda, tau[:nq-1], c[i1*ldc+i2:], ldc, work, lwork) + } + work[0] = float64(lworkopt) + return + } + transt := blas.Trans + if trans == blas.Trans { + transt = blas.NoTrans + } + // Change the operation to get P depending on the size of the initial + // matrix to Dgebrd. The size matters due to the storage location of + // the off-diagonal elements. + if nq > k { + impl.Dormlq(side, transt, m, n, k, a, lda, tau, c, ldc, work, lwork) + } else if nq > 1 { + mi := m + ni := n - 1 + i1 := 0 + i2 := 1 + if left { + mi = m - 1 + ni = n + i1 = 1 + i2 = 0 + } + impl.Dormlq(side, transt, mi, ni, nq-1, a[1:], lda, tau, c[i1*ldc+i2:], ldc, work, lwork) + } + work[0] = float64(lworkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go new file mode 100644 index 00000000..f6cb1b26 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go @@ -0,0 +1,121 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dormhr multiplies an m×n general matrix C with an nq×nq orthogonal matrix Q +// Q * C, if side == blas.Left and trans == blas.NoTrans, +// Q^T * C, if side == blas.Left and trans == blas.Trans, +// C * Q, if side == blas.Right and trans == blas.NoTrans, +// C * Q^T, if side == blas.Right and trans == blas.Trans, +// where nq == m if side == blas.Left and nq == n if side == blas.Right. +// +// Q is defined implicitly as the product of ihi-ilo elementary reflectors, as +// returned by Dgehrd: +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Q is equal to the identity matrix except in the submatrix +// Q[ilo+1:ihi+1,ilo+1:ihi+1]. +// +// ilo and ihi must have the same values as in the previous call of Dgehrd. It +// must hold that +// 0 <= ilo <= ihi < m, if m > 0 and side == blas.Left, +// ilo = 0 and ihi = -1, if m = 0 and side == blas.Left, +// 0 <= ilo <= ihi < n, if n > 0 and side == blas.Right, +// ilo = 0 and ihi = -1, if n = 0 and side == blas.Right. +// +// a and lda represent an m×m matrix if side == blas.Left and an n×n matrix if +// side == blas.Right. The matrix contains vectors which define the elementary +// reflectors, as returned by Dgehrd. +// +// tau contains the scalar factors of the elementary reflectors, as returned by +// Dgehrd. tau must have length m-1 if side == blas.Left and n-1 if side == +// blas.Right. +// +// c and ldc represent the m×n matrix C. On return, c is overwritten by the +// product with Q. +// +// work must have length at least max(1,lwork), and lwork must be at least +// max(1,n), if side == blas.Left, and max(1,m), if side == blas.Right. For +// optimum performance lwork should be at least n*nb if side == blas.Left and +// m*nb if side == blas.Right, where nb is the optimal block size. On return, +// work[0] will contain the optimal value of lwork. +// +// If lwork == -1, instead of performing Dormhr, only the optimal value of lwork +// will be stored in work[0]. +// +// If any requirement on input sizes is not met, Dormhr will panic. +// +// Dormhr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormhr(side blas.Side, trans blas.Transpose, m, n, ilo, ihi int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + var ( + nq int // The order of Q. + nw int // The minimum length of work. + ) + switch side { + case blas.Left: + nq = m + nw = n + case blas.Right: + nq = n + nw = m + default: + panic(badSide) + } + switch { + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case ilo < 0 || max(1, nq) <= ilo: + panic(badIlo) + case ihi < min(ilo, nq-1) || nq <= ihi: + panic(badIhi) + case lwork < max(1, nw) && lwork != -1: + panic(badWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + if lwork != -1 { + checkMatrix(m, n, c, ldc) + checkMatrix(nq, nq, a, lda) + if len(tau) != nq-1 && nq > 0 { + panic(badTau) + } + + } + + nh := ihi - ilo + var nb int + if side == blas.Left { + opts := "LN" + if trans == blas.Trans { + opts = "LT" + } + nb = impl.Ilaenv(1, "DORMQR", opts, nh, n, nh, -1) + } else { + opts := "RN" + if trans == blas.Trans { + opts = "RT" + } + nb = impl.Ilaenv(1, "DORMQR", opts, m, nh, nh, -1) + } + lwkopt := max(1, nw) * nb + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + if m == 0 || n == 0 || nh == 0 { + work[0] = 1 + return + } + if side == blas.Left { + impl.Dormqr(side, trans, nh, n, nh, a[(ilo+1)*lda+ilo:], lda, + tau[ilo:ihi], c[(ilo+1)*ldc:], ldc, work, lwork) + } else { + impl.Dormqr(side, trans, m, nh, nh, a[(ilo+1)*lda+ilo:], lda, + tau[ilo:ihi], c[ilo+1:], ldc, work, lwork) + } + work[0] = float64(lwkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go new file mode 100644 index 00000000..1c217b5b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go @@ -0,0 +1,83 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorml2 multiplies a general matrix C by an orthogonal matrix from an LQ factorization +// determined by Dgelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right +// a is of size k×n. +// +// tau contains the Householder factors and is of length at least k and this function will +// panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dorml2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorml2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + if side != blas.Left && side != blas.Right { + panic(badSide) + } + if trans != blas.Trans && trans != blas.NoTrans { + panic(badTrans) + } + + left := side == blas.Left + notran := trans == blas.NoTrans + if left { + checkMatrix(k, m, a, lda) + if len(work) < n { + panic(badWork) + } + } else { + checkMatrix(k, n, a, lda) + if len(work) < m { + panic(badWork) + } + } + checkMatrix(m, n, c, ldc) + if m == 0 || n == 0 || k == 0 { + return + } + switch { + case left && notran: + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + + case left && !notran: + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + + case !left && notran: + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + + case !left && !notran: + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go new file mode 100644 index 00000000..d7a27643 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go @@ -0,0 +1,159 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormlq multiplies the matrix C by the orthogonal matrix Q defined by the +// slices a and tau. A and tau are as returned from Dgelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right +// A is of size k×n. This uses a blocked algorithm. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, +// and this function will panic otherwise. +// Dormlq uses a block algorithm, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dormlq, +// the optimal work length will be stored into work[0]. +// +// tau contains the Householder scales and must have length at least k, and +// this function will panic otherwise. +func (impl Implementation) Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + if side != blas.Left && side != blas.Right { + panic(badSide) + } + if trans != blas.Trans && trans != blas.NoTrans { + panic(badTrans) + } + left := side == blas.Left + if left { + checkMatrix(k, m, a, lda) + } else { + checkMatrix(k, n, a, lda) + } + checkMatrix(m, n, c, ldc) + if len(tau) < k { + panic(badTau) + } + if len(work) < lwork { + panic(shortWork) + } + nw := m + if left { + nw = n + } + if lwork < max(1, nw) && lwork != -1 { + panic(badWork) + } + + if m == 0 || n == 0 || k == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + tsize = nbmax * ldt + ) + opts := string(side) + string(trans) + nb := min(nbmax, impl.Ilaenv(1, "DORMLQ", opts, m, n, k, -1)) + lworkopt := max(1, nw)*nb + tsize + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + nbmin := 2 + if 1 < nb && nb < k { + iws := nw*nb + tsize + if lwork < iws { + nb = (lwork - tsize) / nw + nbmin = max(2, impl.Ilaenv(2, "DORMLQ", opts, m, n, k, -1)) + } + } + if nb < nbmin || k <= nb { + // Call unblocked code. + impl.Dorml2(side, trans, m, n, k, a, lda, tau, c, ldc, work) + work[0] = float64(lworkopt) + return + } + + t := work[:tsize] + wrk := work[tsize:] + ldwrk := nb + + notran := trans == blas.NoTrans + transt := blas.NoTrans + if notran { + transt = blas.Trans + } + + switch { + case left && notran: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, + a[i*lda+i:], lda, + t, ldt, + c[i*ldc:], ldc, + wrk, ldwrk) + } + + case left && !notran: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, + a[i*lda+i:], lda, + t, ldt, + c[i*ldc:], ldc, + wrk, ldwrk) + } + + case !left && notran: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, + a[i*lda+i:], lda, + t, ldt, + c[i:], ldc, + wrk, ldwrk) + } + + case !left && !notran: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, + a[i*lda+i:], lda, + t, ldt, + c[i:], ldc, + wrk, ldwrk) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go new file mode 100644 index 00000000..3fa9009f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go @@ -0,0 +1,167 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormqr multiplies an m×n matrix C by an orthogonal matrix Q as +// C = Q * C, if side == blas.Left and trans == blas.NoTrans, +// C = Q^T * C, if side == blas.Left and trans == blas.Trans, +// C = C * Q, if side == blas.Right and trans == blas.NoTrans, +// C = C * Q^T, if side == blas.Right and trans == blas.Trans, +// where Q is defined as the product of k elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1}. +// +// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. +// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. +// The ith column of A contains the vector which defines the elementary +// reflector H_i and tau[i] contains its scalar factor. tau must have length k +// and Dormqr will panic otherwise. Dgeqrf returns A and tau in the required +// form. +// +// work must have length at least max(1,lwork), and lwork must be at least n if +// side == blas.Left and at least m if side == blas.Right, otherwise Dormqr will +// panic. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= m if side == blas.Left and lwork >= n if side == +// blas.Right, and this function will panic otherwise. Larger values of lwork +// will generally give better performance. On return, work[0] will contain the +// optimal value of lwork. +// +// If lwork is -1, instead of performing Dormqr, the optimal workspace size will +// be stored into work[0]. +func (impl Implementation) Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + var nq, nw int + switch side { + default: + panic(badSide) + case blas.Left: + nq = m + nw = n + case blas.Right: + nq = n + nw = m + } + switch { + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0 || n < 0: + panic(negDimension) + case k < 0 || nq < k: + panic("lapack: invalid value of k") + case len(work) < lwork: + panic(shortWork) + case lwork < max(1, nw) && lwork != -1: + panic(badWork) + } + if lwork != -1 { + checkMatrix(nq, k, a, lda) + checkMatrix(m, n, c, ldc) + if len(tau) != k { + panic(badTau) + } + } + + if m == 0 || n == 0 || k == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + tsize = nbmax * ldt + ) + opts := string(side) + string(trans) + nb := min(nbmax, impl.Ilaenv(1, "DORMQR", opts, m, n, k, -1)) + lworkopt := max(1, nw)*nb + tsize + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + nbmin := 2 + if 1 < nb && nb < k { + if lwork < nw*nb+tsize { + nb = (lwork - tsize) / nw + nbmin = max(2, impl.Ilaenv(2, "DORMQR", opts, m, n, k, -1)) + } + } + + if nb < nbmin || k <= nb { + // Call unblocked code. + impl.Dorm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work) + work[0] = float64(lworkopt) + return + } + + var ( + ldwork = nb + left = side == blas.Left + notran = trans == blas.NoTrans + ) + switch { + case left && notran: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i*ldc:], ldc, + work[tsize:], ldwork) + } + + case left && !notran: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i*ldc:], ldc, + work[tsize:], ldwork) + } + + case !left && notran: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i:], ldc, + work[tsize:], ldwork) + } + + case !left && !notran: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i:], ldc, + work[tsize:], ldwork) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go new file mode 100644 index 00000000..3a6b4330 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go @@ -0,0 +1,93 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dormr2 multiplies a general matrix C by an orthogonal matrix from a RQ factorization +// determined by Dgerqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of size k×m, and if side == blas.Right +// a is of size k×n. +// +// tau contains the Householder factors and is of length at least k and this function +// will panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dormr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormr2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + if side != blas.Left && side != blas.Right { + panic(badSide) + } + if trans != blas.Trans && trans != blas.NoTrans { + panic(badTrans) + } + + left := side == blas.Left + notran := trans == blas.NoTrans + if left { + if k > m { + panic(kGTM) + } + checkMatrix(k, m, a, lda) + if len(work) < n { + panic(badWork) + } + } else { + if k > n { + panic(kGTN) + } + checkMatrix(k, n, a, lda) + if len(work) < m { + panic(badWork) + } + } + if len(tau) < k { + panic(badTau) + } + checkMatrix(m, n, c, ldc) + + if m == 0 || n == 0 || k == 0 { + return + } + if left { + if notran { + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+(m-k+i)] + a[i*lda+(m-k+i)] = 1 + impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(m-k+i)] = aii + } + return + } + for i := 0; i < k; i++ { + aii := a[i*lda+(m-k+i)] + a[i*lda+(m-k+i)] = 1 + impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(m-k+i)] = aii + } + return + } + if notran { + for i := 0; i < k; i++ { + aii := a[i*lda+(n-k+i)] + a[i*lda+(n-k+i)] = 1 + impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(n-k+i)] = aii + } + return + } + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+(n-k+i)] + a[i*lda+(n-k+i)] = 1 + impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(n-k+i)] = aii + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go new file mode 100644 index 00000000..0c60385b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go @@ -0,0 +1,97 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpbtf2 computes the Cholesky factorization of a symmetric positive banded +// matrix ab. The matrix ab is n×n with kd diagonal bands. The Cholesky +// factorization computed is +// A = U^T * U if ul == blas.Upper +// A = L * L^T if ul == blas.Lower +// ul also specifies the storage of ab. If ul == blas.Upper, then +// ab is stored as an upper-triangular banded matrix with kd super-diagonals, +// and if ul == blas.Lower, ab is stored as a lower-triangular banded matrix +// with kd sub-diagonals. On exit, the banded matrix U or L is stored in-place +// into ab depending on the value of ul. Dpbtf2 returns whether the factorization +// was successfully completed. +// +// The band storage scheme is illustrated below when n = 6, and kd = 2. +// The resulting Cholesky decomposition is stored in the same elements as the +// input band matrix (a11 becomes u11 or l11, etc.). +// +// ul = blas.Upper +// a11 a12 a13 +// a22 a23 a24 +// a33 a34 a35 +// a44 a45 a46 +// a55 a56 * +// a66 * * +// +// ul = blas.Lower +// * * a11 +// * a21 a22 +// a31 a32 a33 +// a42 a43 a44 +// a53 a54 a55 +// a64 a65 a66 +// +// Dpbtf2 is the unblocked version of the algorithm, see Dpbtrf for the blocked +// version. +// +// Dpbtf2 is an internal routine, exported for testing purposes. +func (Implementation) Dpbtf2(ul blas.Uplo, n, kd int, ab []float64, ldab int) (ok bool) { + if ul != blas.Upper && ul != blas.Lower { + panic(badUplo) + } + checkSymBanded(ab, n, kd, ldab) + if n == 0 { + return + } + bi := blas64.Implementation() + kld := max(1, ldab-1) + if ul == blas.Upper { + for j := 0; j < n; j++ { + // Compute U(J,J) and test for non positive-definiteness. + ajj := ab[j*ldab] + if ajj <= 0 { + return false + } + ajj = math.Sqrt(ajj) + ab[j*ldab] = ajj + // Compute elements j+1:j+kn of row J and update the trailing submatrix + // within the band. + kn := min(kd, n-j-1) + if kn > 0 { + bi.Dscal(kn, 1/ajj, ab[j*ldab+1:], 1) + bi.Dsyr(blas.Upper, kn, -1, ab[j*ldab+1:], 1, ab[(j+1)*ldab:], kld) + } + } + return true + } + for j := 0; j < n; j++ { + // Compute L(J,J) and test for non positive-definiteness. + ajj := ab[j*ldab+kd] + if ajj <= 0 { + return false + } + ajj = math.Sqrt(ajj) + ab[j*ldab+kd] = ajj + + // Compute elements J+1:J+KN of column J and update the trailing submatrix + // within the band. + kn := min(kd, n-j-1) + if kn > 0 { + bi.Dscal(kn, 1/ajj, ab[(j+1)*ldab+kd-1:], kld) + bi.Dsyr(blas.Lower, kn, -1, ab[(j+1)*ldab+kd-1:], kld, ab[(j+1)*ldab+kd:], kld) + } + } + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go new file mode 100644 index 00000000..98d6c02b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go @@ -0,0 +1,76 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpocon estimates the reciprocal of the condition number of a positive-definite +// matrix A given the Cholesky decomposition of A. The condition number computed +// is based on the 1-norm and the ∞-norm. +// +// anorm is the 1-norm and the ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 3*n and Dpocon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dpocon will panic otherwise. +func (impl Implementation) Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { + checkMatrix(n, n, a, lda) + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if len(work) < 3*n { + panic(badWork) + } + if len(iwork) < n { + panic(badWork) + } + var rcond float64 + if n == 0 { + return 1 + } + if anorm == 0 { + return rcond + } + + bi := blas64.Implementation() + var ainvnm float64 + smlnum := dlamchS + upper := uplo == blas.Upper + var kase int + var normin bool + isave := new([3]int) + var sl, su float64 + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / ainvnm) / anorm + } + return rcond + } + if upper { + sl = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + normin = true + su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + } else { + sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + normin = true + su = impl.Dlatrs(blas.Lower, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + } + scale := sl * su + if scale != 1 { + ix := bi.Idamax(n, work, 1) + if scale == 0 || scale < math.Abs(work[ix])*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go new file mode 100644 index 00000000..3d1cfb68 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go @@ -0,0 +1,72 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotf2 computes the Cholesky decomposition of the symmetric positive definite +// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, +// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T +// is computed and stored in-place into a. If a is not positive definite, false +// is returned. This is the unblocked version of the algorithm. +// +// Dpotf2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dpotf2(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { + if ul != blas.Upper && ul != blas.Lower { + panic(badUplo) + } + checkMatrix(n, n, a, lda) + + if n == 0 { + return true + } + + bi := blas64.Implementation() + if ul == blas.Upper { + for j := 0; j < n; j++ { + ajj := a[j*lda+j] + if j != 0 { + ajj -= bi.Ddot(j, a[j:], lda, a[j:], lda) + } + if ajj <= 0 || math.IsNaN(ajj) { + a[j*lda+j] = ajj + return false + } + ajj = math.Sqrt(ajj) + a[j*lda+j] = ajj + if j < n-1 { + bi.Dgemv(blas.Trans, j, n-j-1, + -1, a[j+1:], lda, a[j:], lda, + 1, a[j*lda+j+1:], 1) + bi.Dscal(n-j-1, 1/ajj, a[j*lda+j+1:], 1) + } + } + return true + } + for j := 0; j < n; j++ { + ajj := a[j*lda+j] + if j != 0 { + ajj -= bi.Ddot(j, a[j*lda:], 1, a[j*lda:], 1) + } + if ajj <= 0 || math.IsNaN(ajj) { + a[j*lda+j] = ajj + return false + } + ajj = math.Sqrt(ajj) + a[j*lda+j] = ajj + if j < n-1 { + bi.Dgemv(blas.NoTrans, n-j-1, j, + -1, a[(j+1)*lda:], lda, a[j*lda:], 1, + 1, a[(j+1)*lda+j:], lda) + bi.Dscal(n-j-1, 1/ajj, a[(j+1)*lda+j:], lda) + } + } + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go new file mode 100644 index 00000000..0ff3afcc --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go @@ -0,0 +1,72 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotrf computes the Cholesky decomposition of the symmetric positive definite +// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, +// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T +// is computed and stored in-place into a. If a is not positive definite, false +// is returned. This is the blocked version of the algorithm. +func (impl Implementation) Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { + if ul != blas.Upper && ul != blas.Lower { + panic(badUplo) + } + checkMatrix(n, n, a, lda) + + if n == 0 { + return true + } + + nb := impl.Ilaenv(1, "DPOTRF", string(ul), n, -1, -1, -1) + if nb <= 1 || n <= nb { + return impl.Dpotf2(ul, n, a, lda) + } + bi := blas64.Implementation() + if ul == blas.Upper { + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dsyrk(blas.Upper, blas.Trans, jb, j, + -1, a[j:], lda, + 1, a[j*lda+j:], lda) + ok = impl.Dpotf2(blas.Upper, jb, a[j*lda+j:], lda) + if !ok { + return ok + } + if j+jb < n { + bi.Dgemm(blas.Trans, blas.NoTrans, jb, n-j-jb, j, + -1, a[j:], lda, a[j+jb:], lda, + 1, a[j*lda+j+jb:], lda) + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, jb, n-j-jb, + 1, a[j*lda+j:], lda, + a[j*lda+j+jb:], lda) + } + } + return true + } + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dsyrk(blas.Lower, blas.NoTrans, jb, j, + -1, a[j*lda:], lda, + 1, a[j*lda+j:], lda) + ok := impl.Dpotf2(blas.Lower, jb, a[j*lda+j:], lda) + if !ok { + return ok + } + if j+jb < n { + bi.Dgemm(blas.NoTrans, blas.Trans, n-j-jb, jb, j, + -1, a[(j+jb)*lda:], lda, a[j*lda:], lda, + 1, a[(j+jb)*lda+j:], lda) + bi.Dtrsm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, n-j-jb, jb, + 1, a[j*lda+j:], lda, + a[(j+jb)*lda+j:], lda) + } + } + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go new file mode 100644 index 00000000..302c3230 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go @@ -0,0 +1,47 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Drscl multiplies the vector x by 1/a being careful to avoid overflow or +// underflow where possible. +// +// Drscl is an internal routine. It is exported for testing purposes. +func (impl Implementation) Drscl(n int, a float64, x []float64, incX int) { + checkVector(n, x, incX) + bi := blas64.Implementation() + cden := a + cnum := 1.0 + smlnum := dlamchS + bignum := 1 / smlnum + for { + cden1 := cden * smlnum + cnum1 := cnum / bignum + var mul float64 + var done bool + switch { + case cnum != 0 && math.Abs(cden1) > math.Abs(cnum): + mul = smlnum + done = false + cden = cden1 + case math.Abs(cnum1) > math.Abs(cden): + mul = bignum + done = false + cnum = cnum1 + default: + mul = cnum / cden + done = true + } + bi.Dscal(n, mul, x, incX) + if done { + break + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go new file mode 100644 index 00000000..0e1125e5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go @@ -0,0 +1,373 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dsteqr computes the eigenvalues and optionally the eigenvectors of a symmetric +// tridiagonal matrix using the implicit QL or QR method. The eigenvectors of a +// full or band symmetric matrix can also be found if Dsytrd, Dsptrd, or Dsbtrd +// have been used to reduce this matrix to tridiagonal form. +// +// d, on entry, contains the diagonal elements of the tridiagonal matrix. On exit, +// d contains the eigenvalues in ascending order. d must have length n and +// Dsteqr will panic otherwise. +// +// e, on entry, contains the off-diagonal elements of the tridiagonal matrix on +// entry, and is overwritten during the call to Dsteqr. e must have length n-1 and +// Dsteqr will panic otherwise. +// +// z, on entry, contains the n×n orthogonal matrix used in the reduction to +// tridiagonal form if compz == lapack.OriginalEV. On exit, if +// compz == lapack.OriginalEV, z contains the orthonormal eigenvectors of the +// original symmetric matrix, and if compz == lapack.TridiagEV, z contains the +// orthonormal eigenvectors of the symmetric tridiagonal matrix. z is not used +// if compz == lapack.None. +// +// work must have length at least max(1, 2*n-2) if the eigenvectors are computed, +// and Dsteqr will panic otherwise. +// +// Dsteqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsteqr(compz lapack.EVComp, n int, d, e, z []float64, ldz int, work []float64) (ok bool) { + if n < 0 { + panic(nLT0) + } + if len(d) < n { + panic(badD) + } + if len(e) < n-1 { + panic(badE) + } + if compz != lapack.None && compz != lapack.TridiagEV && compz != lapack.OriginalEV { + panic(badEVComp) + } + if compz != lapack.None { + if len(work) < max(1, 2*n-2) { + panic(badWork) + } + checkMatrix(n, n, z, ldz) + } + + var icompz int + if compz == lapack.OriginalEV { + icompz = 1 + } else if compz == lapack.TridiagEV { + icompz = 2 + } + + if n == 0 { + return true + } + if n == 1 { + if icompz == 2 { + z[0] = 1 + } + return true + } + + bi := blas64.Implementation() + + eps := dlamchE + eps2 := eps * eps + safmin := dlamchS + safmax := 1 / safmin + ssfmax := math.Sqrt(safmax) / 3 + ssfmin := math.Sqrt(safmin) / eps2 + + // Compute the eigenvalues and eigenvectors of the tridiagonal matrix. + if icompz == 2 { + impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) + } + const maxit = 30 + nmaxit := n * maxit + + jtot := 0 + + // Determine where the matrix splits and choose QL or QR iteration for each + // block, according to whether top or bottom diagonal element is smaller. + l1 := 0 + nm1 := n - 1 + + type scaletype int + const ( + down scaletype = iota + 1 + up + ) + var iscale scaletype + + for { + if l1 > n-1 { + // Order eigenvalues and eigenvectors. + if icompz == 0 { + impl.Dlasrt(lapack.SortIncreasing, n, d) + } else { + // TODO(btracey): Consider replacing this sort with a call to sort.Sort. + for ii := 1; ii < n; ii++ { + i := ii - 1 + k := i + p := d[i] + for j := ii; j < n; j++ { + if d[j] < p { + k = j + p = d[j] + } + } + if k != i { + d[k] = d[i] + d[i] = p + bi.Dswap(n, z[i:], ldz, z[k:], ldz) + } + } + } + return true + } + if l1 > 0 { + e[l1-1] = 0 + } + var m int + if l1 <= nm1 { + for m = l1; m < nm1; m++ { + test := math.Abs(e[m]) + if test == 0 { + break + } + if test <= (math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1])))*eps { + e[m] = 0 + break + } + } + } + l := l1 + lsv := l + lend := m + lendsv := lend + l1 = m + 1 + if lend == l { + continue + } + + // Scale submatrix in rows and columns L to Lend + anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) + switch { + case anorm == 0: + continue + case anorm > ssfmax: + iscale = down + // Pretend that d and e are matrices with 1 column. + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], 1) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], 1) + case anorm < ssfmin: + iscale = up + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], 1) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], 1) + } + + // Choose between QL and QR. + if math.Abs(d[lend]) < math.Abs(d[l]) { + lend = lsv + l = lendsv + } + if lend > l { + // QL Iteration. Look for small subdiagonal element. + for { + if l != lend { + for m = l; m < lend; m++ { + v := math.Abs(e[m]) + if v*v <= (eps2*math.Abs(d[m]))*math.Abs(d[m+1])+safmin { + break + } + } + } else { + m = lend + } + if m < lend { + e[m] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l++ + if l > lend { + break + } + continue + } + + // If remaining matrix is 2×2, use Dlae2 to compute its eigensystem. + if m == l+1 { + if icompz > 0 { + d[l], d[l+1], work[l], work[n-1+l] = impl.Dlaev2(d[l], e[l], d[l+1]) + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, + n, 2, work[l:], work[n-1+l:], z[l:], ldz) + } else { + d[l], d[l+1] = impl.Dlae2(d[l], e[l], d[l+1]) + } + e[l] = 0 + l += 2 + if l > lend { + break + } + continue + } + + if jtot == nmaxit { + break + } + jtot++ + + // Form shift + g := (d[l+1] - p) / (2 * e[l]) + r := impl.Dlapy2(g, 1) + g = d[m] - p + e[l]/(g+math.Copysign(r, g)) + s := 1.0 + c := 1.0 + p = 0.0 + + // Inner loop + for i := m - 1; i >= l; i-- { + f := s * e[i] + b := c * e[i] + c, s, r = impl.Dlartg(g, f) + if i != m-1 { + e[i+1] = r + } + g = d[i+1] - p + r = (d[i]-g)*s + 2*c*b + p = s * r + d[i+1] = g + p + g = c*r - b + + // If eigenvectors are desired, then save rotations. + if icompz > 0 { + work[i] = c + work[n-1+i] = -s + } + } + // If eigenvectors are desired, then apply saved rotations. + if icompz > 0 { + mm := m - l + 1 + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, + n, mm, work[l:], work[n-1+l:], z[l:], ldz) + } + d[l] -= p + e[l] = g + } + } else { + // QR Iteration. + // Look for small superdiagonal element. + for { + if l != lend { + for m = l; m > lend; m-- { + v := math.Abs(e[m-1]) + if v*v <= (eps2*math.Abs(d[m])*math.Abs(d[m-1]) + safmin) { + break + } + } + } else { + m = lend + } + if m > lend { + e[m-1] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found + l-- + if l < lend { + break + } + continue + } + + // If remaining matrix is 2×2, use Dlae2 to compute its eigenvalues. + if m == l-1 { + if icompz > 0 { + d[l-1], d[l], work[m], work[n-1+m] = impl.Dlaev2(d[l-1], e[l-1], d[l]) + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, + n, 2, work[m:], work[n-1+m:], z[l-1:], ldz) + } else { + d[l-1], d[l] = impl.Dlae2(d[l-1], e[l-1], d[l]) + } + e[l-1] = 0 + l -= 2 + if l < lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + g := (d[l-1] - p) / (2 * e[l-1]) + r := impl.Dlapy2(g, 1) + g = d[m] - p + (e[l-1])/(g+math.Copysign(r, g)) + s := 1.0 + c := 1.0 + p = 0.0 + + // Inner loop. + for i := m; i < l; i++ { + f := s * e[i] + b := c * e[i] + c, s, r = impl.Dlartg(g, f) + if i != m { + e[i-1] = r + } + g = d[i] - p + r = (d[i+1]-g)*s + 2*c*b + p = s * r + d[i] = g + p + g = c*r - b + + // If eigenvectors are desired, then save rotations. + if icompz > 0 { + work[i] = c + work[n-1+i] = s + } + } + + // If eigenvectors are desired, then apply saved rotations. + if icompz > 0 { + mm := l - m + 1 + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, + n, mm, work[m:], work[n-1+m:], z[m:], ldz) + } + d[l] -= p + e[l-1] = g + } + } + + // Undo scaling if necessary. + switch iscale { + case down: + // Pretend that d and e are matrices with 1 column. + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], 1) + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv, 1, e[lsv:], 1) + case up: + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], 1) + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv, 1, e[lsv:], 1) + } + + // Check for no convergence to an eigenvalue after a total of n*maxit iterations. + if jtot >= nmaxit { + break + } + } + for i := 0; i < n-1; i++ { + if e[i] != 0 { + return false + } + } + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go new file mode 100644 index 00000000..636cf1eb --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go @@ -0,0 +1,278 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dsterf computes all eigenvalues of a symmetric tridiagonal matrix using the +// Pal-Walker-Kahan variant of the QL or QR algorithm. +// +// d contains the diagonal elements of the tridiagonal matrix on entry, and +// contains the eigenvalues in ascending order on exit. d must have length at +// least n, or Dsterf will panic. +// +// e contains the off-diagonal elements of the tridiagonal matrix on entry, and is +// overwritten during the call to Dsterf. e must have length of at least n-1 or +// Dsterf will panic. +// +// Dsterf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsterf(n int, d, e []float64) (ok bool) { + if n < 0 { + panic(nLT0) + } + if n == 0 { + return true + } + if len(d) < n { + panic(badD) + } + if len(e) < n-1 { + panic(badE) + } + + const ( + none = 0 // The values are not scaled. + down = 1 // The values are scaled below ssfmax threshold. + up = 2 // The values are scaled below ssfmin threshold. + ) + + // Determine the unit roundoff for this environment. + eps := dlamchE + eps2 := eps * eps + safmin := dlamchS + safmax := 1 / safmin + ssfmax := math.Sqrt(safmax) / 3 + ssfmin := math.Sqrt(safmin) / eps2 + + // Compute the eigenvalues of the tridiagonal matrix. + maxit := 30 + nmaxit := n * maxit + jtot := 0 + + l1 := 0 + + for { + if l1 > n-1 { + impl.Dlasrt(lapack.SortIncreasing, n, d) + return true + } + if l1 > 0 { + e[l1-1] = 0 + } + var m int + for m = l1; m < n-1; m++ { + if math.Abs(e[m]) <= math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1]))*eps { + e[m] = 0 + break + } + } + + l := l1 + lsv := l + lend := m + lendsv := lend + l1 = m + 1 + if lend == 0 { + continue + } + + // Scale submatrix in rows and columns l to lend. + anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) + iscale := none + if anorm == 0 { + continue + } + if anorm > ssfmax { + iscale = down + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], n) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], n) + } else if anorm < ssfmin { + iscale = up + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], n) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], n) + } + + el := e[l:lend] + for i, v := range el { + el[i] *= v + } + + // Choose between QL and QR iteration. + if math.Abs(d[lend]) < math.Abs(d[l]) { + lend = lsv + l = lendsv + } + if lend >= l { + // QL Iteration. + // Look for small sub-diagonal element. + for { + if l != lend { + for m = l; m < lend; m++ { + if math.Abs(e[m]) <= eps2*(math.Abs(d[m]*d[m+1])) { + break + } + } + } else { + m = lend + } + if m < lend { + e[m] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l++ + if l > lend { + break + } + continue + } + // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. + if m == l+1 { + d[l], d[l+1] = impl.Dlae2(d[l], math.Sqrt(e[l]), d[l+1]) + e[l] = 0 + l += 2 + if l > lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + rte := math.Sqrt(e[l]) + sigma := (d[l+1] - p) / (2 * rte) + r := impl.Dlapy2(sigma, 1) + sigma = p - (rte / (sigma + math.Copysign(r, sigma))) + + c := 1.0 + s := 0.0 + gamma := d[m] - sigma + p = gamma * gamma + + // Inner loop. + for i := m - 1; i >= l; i-- { + bb := e[i] + r := p + bb + if i != m-1 { + e[i+1] = s * r + } + oldc := c + c = p / r + s = bb / r + oldgam := gamma + alpha := d[i] + gamma = c*(alpha-sigma) - s*oldgam + d[i+1] = oldgam + (alpha - gamma) + if c != 0 { + p = (gamma * gamma) / c + } else { + p = oldc * bb + } + } + e[l] = s * p + d[l] = sigma + gamma + } + } else { + for { + // QR Iteration. + // Look for small super-diagonal element. + for m = l; m > lend; m-- { + if math.Abs(e[m-1]) <= eps2*math.Abs(d[m]*d[m-1]) { + break + } + } + if m > lend { + e[m-1] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l-- + if l < lend { + break + } + continue + } + + // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. + if m == l-1 { + d[l], d[l-1] = impl.Dlae2(d[l], math.Sqrt(e[l-1]), d[l-1]) + e[l-1] = 0 + l -= 2 + if l < lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + rte := math.Sqrt(e[l-1]) + sigma := (d[l-1] - p) / (2 * rte) + r := impl.Dlapy2(sigma, 1) + sigma = p - (rte / (sigma + math.Copysign(r, sigma))) + + c := 1.0 + s := 0.0 + gamma := d[m] - sigma + p = gamma * gamma + + // Inner loop. + for i := m; i < l; i++ { + bb := e[i] + r := p + bb + if i != m { + e[i-1] = s * r + } + oldc := c + c = p / r + s = bb / r + oldgam := gamma + alpha := d[i+1] + gamma = c*(alpha-sigma) - s*oldgam + d[i] = oldgam + alpha - gamma + if c != 0 { + p = (gamma * gamma) / c + } else { + p = oldc * bb + } + } + e[l-1] = s * p + d[l] = sigma + gamma + } + } + + // Undo scaling if necessary + switch iscale { + case down: + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], n) + case up: + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], n) + } + + // Check for no convergence to an eigenvalue after a total of n*maxit iterations. + if jtot >= nmaxit { + break + } + } + for _, v := range e[:n-1] { + if v != 0 { + return false + } + } + impl.Dlasrt(lapack.SortIncreasing, n, d) + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go new file mode 100644 index 00000000..b4c20c75 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dsyev computes all eigenvalues and, optionally, the eigenvectors of a real +// symmetric matrix A. +// +// w contains the eigenvalues in ascending order upon return. w must have length +// at least n, and Dsyev will panic otherwise. +// +// On entry, a contains the elements of the symmetric matrix A in the triangular +// portion specified by uplo. If jobz == lapack.ComputeEV a contains the +// orthonormal eigenvectors of A on exit, otherwise on exit the specified +// triangular region is overwritten. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 3*n-1, and Dsyev will panic otherwise. The amount of blocking is +// limited by the usable length. If lwork == -1, instead of computing Dsyev the +// optimal work length is stored into work[0]. +func (impl Implementation) Dsyev(jobz lapack.EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) { + checkMatrix(n, n, a, lda) + upper := uplo == blas.Upper + wantz := jobz == lapack.ComputeEV + var opts string + if upper { + opts = "U" + } else { + opts = "L" + } + nb := impl.Ilaenv(1, "DSYTRD", opts, n, -1, -1, -1) + lworkopt := max(1, (nb+2)*n) + work[0] = float64(lworkopt) + if lwork == -1 { + return + } + if len(work) < lwork { + panic(badWork) + } + if lwork < 3*n-1 { + panic(badWork) + } + if n == 0 { + return true + } + if n == 1 { + w[0] = a[0] + work[0] = 2 + if wantz { + a[0] = 1 + } + return true + } + safmin := dlamchS + eps := dlamchP + smlnum := safmin / eps + bignum := 1 / smlnum + rmin := math.Sqrt(smlnum) + rmax := math.Sqrt(bignum) + + // Scale matrix to allowable range, if necessary. + anrm := impl.Dlansy(lapack.MaxAbs, uplo, n, a, lda, work) + scaled := false + var sigma float64 + if anrm > 0 && anrm < rmin { + scaled = true + sigma = rmin / anrm + } else if anrm > rmax { + scaled = true + sigma = rmax / anrm + } + if scaled { + kind := lapack.LowerTri + if upper { + kind = lapack.UpperTri + } + impl.Dlascl(kind, 0, 0, 1, sigma, n, n, a, lda) + } + var inde int + indtau := inde + n + indwork := indtau + n + llwork := lwork - indwork + impl.Dsytrd(uplo, n, a, lda, w, work[inde:], work[indtau:], work[indwork:], llwork) + + // For eigenvalues only, call Dsterf. For eigenvectors, first call Dorgtr + // to generate the orthogonal matrix, then call Dsteqr. + if !wantz { + ok = impl.Dsterf(n, w, work[inde:]) + } else { + impl.Dorgtr(uplo, n, a, lda, work[indtau:], work[indwork:], llwork) + ok = impl.Dsteqr(lapack.EVComp(jobz), n, w, work[inde:], a, lda, work[indtau:]) + } + if !ok { + return false + } + + // If the matrix was scaled, then rescale eigenvalues appropriately. + if scaled { + bi := blas64.Implementation() + bi.Dscal(n, 1/sigma, w, 1) + } + work[0] = float64(lworkopt) + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go new file mode 100644 index 00000000..b6dc60c0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go @@ -0,0 +1,123 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dsytd2 reduces a symmetric n×n matrix A to symmetric tridiagonal form T by an +// orthogonal similarity transformation +// Q^T * A * Q = T +// On entry, the matrix is contained in the specified triangle of a. On exit, +// if uplo == blas.Upper, the diagonal and first super-diagonal of a are +// overwritten with the elements of T. The elements above the first super-diagonal +// are overwritten with the the elementary reflectors that are used with the +// elements written to tau in order to construct Q. If uplo == blas.Lower, the +// elements are written in the lower triangular region. +// +// d must have length at least n. e and tau must have length at least n-1. Dsytd2 +// will panic if these sizes are not met. +// +// Q is represented as a product of elementary reflectors. +// If uplo == blas.Upper +// Q = H_{n-2} * ... * H_1 * H_0 +// and if uplo == blas.Lower +// Q = H_0 * H_1 * ... * H_{n-2} +// where +// H_i = I - tau * v * v^T +// where tau is stored in tau[i], and v is stored in a. +// +// If uplo == blas.Upper, v[0:i-1] is stored in A[0:i-1,i+1], v[i] = 1, and +// v[i+1:] = 0. The elements of a are +// [ d e v2 v3 v4] +// [ d e v3 v4] +// [ d e v4] +// [ d e] +// [ d] +// If uplo == blas.Lower, v[0:i+1] = 0, v[i+1] = 1, and v[i+2:] is stored in +// A[i+2:n,i]. +// The elements of a are +// [ d ] +// [ e d ] +// [v1 e d ] +// [v1 v2 e d ] +// [v1 v2 v3 e d] +// +// Dsytd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsytd2(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau []float64) { + checkMatrix(n, n, a, lda) + if len(d) < n { + panic(badD) + } + if len(e) < n-1 { + panic(badE) + } + if len(tau) < n-1 { + panic(badTau) + } + if n <= 0 { + return + } + bi := blas64.Implementation() + if uplo == blas.Upper { + // Reduce the upper triangle of A. + for i := n - 2; i >= 0; i-- { + // Generate elementary reflector H_i = I - tau * v * v^T to + // annihilate A[i:i-1, i+1]. + var taui float64 + a[i*lda+i+1], taui = impl.Dlarfg(i+1, a[i*lda+i+1], a[i+1:], lda) + e[i] = a[i*lda+i+1] + if taui != 0 { + // Apply H_i from both sides to A[0:i,0:i]. + a[i*lda+i+1] = 1 + + // Compute x := tau * A * v storing x in tau[0:i]. + bi.Dsymv(uplo, i+1, taui, a, lda, a[i+1:], lda, 0, tau, 1) + + // Compute w := x - 1/2 * tau * (x^T * v) * v. + alpha := -0.5 * taui * bi.Ddot(i+1, tau, 1, a[i+1:], lda) + bi.Daxpy(i+1, alpha, a[i+1:], lda, tau, 1) + + // Apply the transformation as a rank-2 update + // A = A - v * w^T - w * v^T. + bi.Dsyr2(uplo, i+1, -1, a[i+1:], lda, tau, 1, a, lda) + a[i*lda+i+1] = e[i] + } + d[i+1] = a[(i+1)*lda+i+1] + tau[i] = taui + } + d[0] = a[0] + return + } + // Reduce the lower triangle of A. + for i := 0; i < n-1; i++ { + // Generate elementary reflector H_i = I - tau * v * v^T to + // annihilate A[i+2:n, i]. + var taui float64 + a[(i+1)*lda+i], taui = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + if taui != 0 { + // Apply H_i from both sides to A[i+1:n, i+1:n]. + a[(i+1)*lda+i] = 1 + + // Compute x := tau * A * v, storing y in tau[i:n-1]. + bi.Dsymv(uplo, n-i-1, taui, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, tau[i:], 1) + + // Compute w := x - 1/2 * tau * (x^T * v) * v. + alpha := -0.5 * taui * bi.Ddot(n-i-1, tau[i:], 1, a[(i+1)*lda+i:], lda) + bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, tau[i:], 1) + + // Apply the transformation as a rank-2 update + // A = A - v * w^T - w * v^T. + bi.Dsyr2(uplo, n-i-1, -1, a[(i+1)*lda+i:], lda, tau[i:], 1, a[(i+1)*lda+i+1:], lda) + a[(i+1)*lda+i] = e[i] + } + d[i] = a[i*lda+i] + tau[i] = taui + } + d[n-1] = a[(n-1)*lda+n-1] +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go new file mode 100644 index 00000000..b079140c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go @@ -0,0 +1,178 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dsytrd reduces a symmetric n×n matrix A to symmetric tridiagonal form by an +// orthogonal similarity transformation +// Q^T * A * Q = T +// where Q is an orthonormal matrix and T is symmetric and tridiagonal. +// +// On entry, a contains the elements of the input matrix in the triangle specified +// by uplo. On exit, the diagonal and sub/super-diagonal are overwritten by the +// corresponding elements of the tridiagonal matrix T. The remaining elements in +// the triangle, along with the array tau, contain the data to construct Q as +// the product of elementary reflectors. +// +// If uplo == blas.Upper, Q is constructed with +// Q = H_{n-2} * ... * H_1 * H_0 +// where +// H_i = I - tau_i * v * v^T +// v is constructed as v[i+1:n] = 0, v[i] = 1, v[0:i-1] is stored in A[0:i-1, i+1]. +// The elements of A are +// [ d e v1 v2 v3] +// [ d e v2 v3] +// [ d e v3] +// [ d e] +// [ e] +// +// If uplo == blas.Lower, Q is constructed with +// Q = H_0 * H_1 * ... * H_{n-2} +// where +// H_i = I - tau_i * v * v^T +// v is constructed as v[0:i+1] = 0, v[i+1] = 1, v[i+2:n] is stored in A[i+2:n, i]. +// The elements of A are +// [ d ] +// [ e d ] +// [v0 e d ] +// [v0 v1 e d ] +// [v0 v1 v2 e d] +// +// d must have length n, and e and tau must have length n-1. Dsytrd will panic if +// these conditions are not met. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 1, and Dsytrd will panic otherwise. The amount of blocking is +// limited by the usable length. +// If lwork == -1, instead of computing Dsytrd the optimal work length is stored +// into work[0]. +// +// Dsytrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsytrd(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau, work []float64, lwork int) { + checkMatrix(n, n, a, lda) + if len(d) < n { + panic(badD) + } + if len(e) < n-1 { + panic(badE) + } + if len(tau) < n-1 { + panic(badTau) + } + if len(work) < lwork { + panic(shortWork) + } + if lwork != -1 && lwork < 1 { + panic(badWork) + } + + var upper bool + var opts string + switch uplo { + case blas.Upper: + upper = true + opts = "U" + case blas.Lower: + opts = "L" + default: + panic(badUplo) + } + + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DSYTRD", opts, n, -1, -1, -1) + lworkopt := n * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + nx := n + bi := blas64.Implementation() + var ldwork int + if 1 < nb && nb < n { + // Determine when to cross over from blocked to unblocked code. The last + // block is always handled by unblocked code. + opts := "L" + if upper { + opts = "U" + } + nx = max(nb, impl.Ilaenv(3, "DSYTRD", opts, n, -1, -1, -1)) + if nx < n { + // Determine if workspace is large enough for blocked code. + ldwork = nb + iws := n * ldwork + if lwork < iws { + // Not enough workspace to use optimal nb: determine the minimum + // value of nb and reduce nb or force use of unblocked code by + // setting nx = n. + nb = max(lwork/n, 1) + nbmin := impl.Ilaenv(2, "DSYTRD", opts, n, -1, -1, -1) + if nb < nbmin { + nx = n + } + } + } else { + nx = n + } + } else { + nb = 1 + } + ldwork = nb + + if upper { + // Reduce the upper triangle of A. Columns 0:kk are handled by the + // unblocked method. + var i int + kk := n - ((n-nx+nb-1)/nb)*nb + for i = n - nb; i >= kk; i -= nb { + // Reduce columns i:i+nb to tridiagonal form and form the matrix W + // which is needed to update the unreduced part of the matrix. + impl.Dlatrd(uplo, i+nb, nb, a, lda, e, tau, work, ldwork) + + // Update the unreduced submatrix A[0:i-1,0:i-1], using an update + // of the form A = A - V*W^T - W*V^T. + bi.Dsyr2k(uplo, blas.NoTrans, i, nb, -1, a[i:], lda, work, ldwork, 1, a, lda) + + // Copy superdiagonal elements back into A, and diagonal elements into D. + for j := i; j < i+nb; j++ { + a[(j-1)*lda+j] = e[j-1] + d[j] = a[j*lda+j] + } + } + // Use unblocked code to reduce the last or only block + // check that i == kk. + impl.Dsytd2(uplo, kk, a, lda, d, e, tau) + } else { + var i int + // Reduce the lower triangle of A. + for i = 0; i < n-nx; i += nb { + // Reduce columns 0:i+nb to tridiagonal form and form the matrix W + // which is needed to update the unreduced part of the matrix. + impl.Dlatrd(uplo, n-i, nb, a[i*lda+i:], lda, e[i:], tau[i:], work, ldwork) + + // Update the unreduced submatrix A[i+ib:n, i+ib:n], using an update + // of the form A = A + V*W^T - W*V^T. + bi.Dsyr2k(uplo, blas.NoTrans, n-i-nb, nb, -1, a[(i+nb)*lda+i:], lda, + work[nb*ldwork:], ldwork, 1, a[(i+nb)*lda+i+nb:], lda) + + // Copy subdiagonal elements back into A, and diagonal elements into D. + for j := i; j < i+nb; j++ { + a[(j+1)*lda+j] = e[j] + d[j] = a[j*lda+j] + } + } + // Use unblocked code to reduce the last or only block. + impl.Dsytd2(uplo, n-i, a[i*lda+i:], lda, d[i:], e[i:], tau[i:]) + } + work[0] = float64(lworkopt) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go new file mode 100644 index 00000000..8a1beefe --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go @@ -0,0 +1,357 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtgsja computes the generalized singular value decomposition (GSVD) +// of two real upper triangular or trapezoidal matrices A and B. +// +// A and B have the following forms, which may be obtained by the +// preprocessing subroutine Dggsvp from a general m×n matrix A and p×n +// matrix B: +// +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] +// +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] +// +// n-k-l k l +// B = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] +// +// where the k×k matrix A12 and l×l matrix B13 are non-singular +// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, +// otherwise A23 is (m-k)×l upper trapezoidal. +// +// On exit, +// +// U^T*A*Q = D1*[ 0 R ], V^T*B*Q = D2*[ 0 R ], +// +// where U, V and Q are orthogonal matrices. +// R is a non-singular upper triangular matrix, and D1 and D2 are +// diagonal matrices, which are of the following structures: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[0:m, n-k-l:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// The computation of the orthogonal transformation matrices U, V or Q +// is optional. These matrices may either be formed explicitly, or they +// may be post-multiplied into input matrices U1, V1, or Q1. +// +// Dtgsja essentially uses a variant of Kogbetliantz algorithm to reduce +// min(l,m-k)×l triangular or trapezoidal matrix A23 and l×l +// matrix B13 to the form: +// +// U1^T*A13*Q1 = C1*R1; V1^T*B13*Q1 = S1*R1, +// +// where U1, V1 and Q1 are orthogonal matrices. C1 and S1 are diagonal +// matrices satisfying +// +// C1^2 + S1^2 = I, +// +// and R1 is an l×l non-singular upper triangular matrix. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDUnit Use unit-initialized matrix +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// k and l specify the sub-blocks in the input matrices A and B: +// A23 = A[k:min(k+l,m), n-l:n) and B13 = B[0:l, n-l:n] +// of A and B, whose GSVD is going to be computed by Dtgsja. +// +// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz +// iteration procedure. Generally, they are the same as used in the preprocessing +// step, for example, +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps, +// where eps is the machine epsilon. +// +// work must have length at least 2*n, otherwise Dtgsja will panic. +// +// alpha and beta must have length n or Dtgsja will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, A[n-k:n, 0:min(k+l,m)] contains the triangular matrix R or part of R +// and if necessary, B[m-k:l, n+m-k-l:n] contains a part of R. +// +// Dtgsja returns whether the routine converged and the number of iteration cycles +// that were run. +// +// Dtgsja is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtgsja(jobU, jobV, jobQ lapack.GSVDJob, m, p, n, k, l int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64) (cycles int, ok bool) { + const maxit = 40 + + checkMatrix(m, n, a, lda) + checkMatrix(p, n, b, ldb) + + if len(alpha) != n { + panic(badAlpha) + } + if len(beta) != n { + panic(badBeta) + } + + initu := jobU == lapack.GSVDUnit + wantu := initu || jobU == lapack.GSVDU + if !initu && !wantu && jobU != lapack.GSVDNone { + panic(badGSVDJob + "U") + } + if jobU != lapack.GSVDNone { + checkMatrix(m, m, u, ldu) + } + + initv := jobV == lapack.GSVDUnit + wantv := initv || jobV == lapack.GSVDV + if !initv && !wantv && jobV != lapack.GSVDNone { + panic(badGSVDJob + "V") + } + if jobV != lapack.GSVDNone { + checkMatrix(p, p, v, ldv) + } + + initq := jobQ == lapack.GSVDUnit + wantq := initq || jobQ == lapack.GSVDQ + if !initq && !wantq && jobQ != lapack.GSVDNone { + panic(badGSVDJob + "Q") + } + if jobQ != lapack.GSVDNone { + checkMatrix(n, n, q, ldq) + } + + if len(work) < 2*n { + panic(badWork) + } + + // Initialize U, V and Q, if necessary + if initu { + impl.Dlaset(blas.All, m, m, 0, 1, u, ldu) + } + if initv { + impl.Dlaset(blas.All, p, p, 0, 1, v, ldv) + } + if initq { + impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) + } + + bi := blas64.Implementation() + minTol := math.Min(tola, tolb) + + // Loop until convergence. + upper := false + for cycles = 1; cycles <= maxit; cycles++ { + upper = !upper + + for i := 0; i < l-1; i++ { + for j := i + 1; j < l; j++ { + var a1, a2, a3 float64 + if k+i < m { + a1 = a[(k+i)*lda+n-l+i] + } + if k+j < m { + a3 = a[(k+j)*lda+n-l+j] + } + + b1 := b[i*ldb+n-l+i] + b3 := b[j*ldb+n-l+j] + + var b2 float64 + if upper { + if k+i < m { + a2 = a[(k+i)*lda+n-l+j] + } + b2 = b[i*ldb+n-l+j] + } else { + if k+j < m { + a2 = a[(k+j)*lda+n-l+i] + } + b2 = b[j*ldb+n-l+i] + } + + csu, snu, csv, snv, csq, snq := impl.Dlags2(upper, a1, a2, a3, b1, b2, b3) + + // Update (k+i)-th and (k+j)-th rows of matrix A: U^T*A. + if k+j < m { + bi.Drot(l, a[(k+j)*lda+n-l:], 1, a[(k+i)*lda+n-l:], 1, csu, snu) + } + + // Update i-th and j-th rows of matrix B: V^T*B. + bi.Drot(l, b[j*ldb+n-l:], 1, b[i*ldb+n-l:], 1, csv, snv) + + // Update (n-l+i)-th and (n-l+j)-th columns of matrices + // A and B: A*Q and B*Q. + bi.Drot(min(k+l, m), a[n-l+j:], lda, a[n-l+i:], lda, csq, snq) + bi.Drot(l, b[n-l+j:], ldb, b[n-l+i:], ldb, csq, snq) + + if upper { + if k+i < m { + a[(k+i)*lda+n-l+j] = 0 + } + b[i*ldb+n-l+j] = 0 + } else { + if k+j < m { + a[(k+j)*lda+n-l+i] = 0 + } + b[j*ldb+n-l+i] = 0 + } + + // Update orthogonal matrices U, V, Q, if desired. + if wantu && k+j < m { + bi.Drot(m, u[k+j:], ldu, u[k+i:], ldu, csu, snu) + } + if wantv { + bi.Drot(p, v[j:], ldv, v[i:], ldv, csv, snv) + } + if wantq { + bi.Drot(n, q[n-l+j:], ldq, q[n-l+i:], ldq, csq, snq) + } + } + } + + if !upper { + // The matrices A13 and B13 were lower triangular at the start + // of the cycle, and are now upper triangular. + // + // Convergence test: test the parallelism of the corresponding + // rows of A and B. + var error float64 + for i := 0; i < min(l, m-k); i++ { + bi.Dcopy(l-i, a[(k+i)*lda+n-l+i:], 1, work, 1) + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, work[l:], 1) + ssmin := impl.Dlapll(l-i, work, 1, work[l:], 1) + error = math.Max(error, ssmin) + } + if math.Abs(error) <= minTol { + // The algorithm has converged. + // Compute the generalized singular value pairs (alpha, beta) + // and set the triangular matrix R to array A. + for i := 0; i < k; i++ { + alpha[i] = 1 + beta[i] = 0 + } + + for i := 0; i < min(l, m-k); i++ { + a1 := a[(k+i)*lda+n-l+i] + b1 := b[i*ldb+n-l+i] + + if a1 != 0 { + gamma := b1 / a1 + + // Change sign if necessary. + if gamma < 0 { + bi.Dscal(l-i, -1, b[i*ldb+n-l+i:], 1) + if wantv { + bi.Dscal(p, -1, v[i:], ldv) + } + } + beta[k+i], alpha[k+i], _ = impl.Dlartg(math.Abs(gamma), 1) + + if alpha[k+i] >= beta[k+i] { + bi.Dscal(l-i, 1/alpha[k+i], a[(k+i)*lda+n-l+i:], 1) + } else { + bi.Dscal(l-i, 1/beta[k+i], b[i*ldb+n-l+i:], 1) + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) + } + } else { + alpha[k+i] = 0 + beta[k+i] = 1 + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) + } + } + + for i := m; i < k+l; i++ { + alpha[i] = 0 + beta[i] = 1 + } + if k+l < n { + for i := k + l; i < n; i++ { + alpha[i] = 0 + beta[i] = 0 + } + } + + return cycles, true + } + } + } + + // The algorithm has not converged after maxit cycles. + return cycles, false +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go new file mode 100644 index 00000000..42d9648f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go @@ -0,0 +1,82 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtrcon estimates the reciprocal of the condition number of a triangular matrix A. +// The condition number computed may be based on the 1-norm or the ∞-norm. +// +// work is a temporary data slice of length at least 3*n and Dtrcon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dtrcon will panic otherwise. +func (impl Implementation) Dtrcon(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 { + if norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum { + panic(badNorm) + } + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if diag != blas.NonUnit && diag != blas.Unit { + panic(badDiag) + } + if len(work) < 3*n { + panic(badWork) + } + if len(iwork) < n { + panic(badWork) + } + if n == 0 { + return 1 + } + bi := blas64.Implementation() + + var rcond float64 + smlnum := dlamchS * float64(n) + + anorm := impl.Dlantr(norm, uplo, diag, n, n, a, lda, work) + + if anorm <= 0 { + return rcond + } + var ainvnm float64 + var normin bool + kase1 := 2 + if norm == lapack.MaxColumnSum { + kase1 = 1 + } + var kase int + isave := new([3]int) + var scale float64 + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / anorm) / ainvnm + } + return rcond + } + if kase == kase1 { + scale = impl.Dlatrs(uplo, blas.NoTrans, diag, normin, n, a, lda, work, work[2*n:]) + } else { + scale = impl.Dlatrs(uplo, blas.Trans, diag, normin, n, a, lda, work, work[2*n:]) + } + normin = true + if scale != 1 { + ix := bi.Idamax(n, work, 1) + xnorm := math.Abs(work[ix]) + if scale == 0 || scale < xnorm*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go new file mode 100644 index 00000000..6e9a3354 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go @@ -0,0 +1,865 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtrevc3 computes some or all of the right and/or left eigenvectors of an n×n +// upper quasi-triangular matrix T in Schur canonical form. Matrices of this +// type are produced by the Schur factorization of a real general matrix A +// A = Q T Q^T, +// as computed by Dhseqr. +// +// The right eigenvector x of T corresponding to an +// eigenvalue λ is defined by +// T x = λ x, +// and the left eigenvector y is defined by +// y^T T = λ y^T. +// +// The eigenvalues are read directly from the diagonal blocks of T. +// +// This routine returns the matrices X and/or Y of right and left eigenvectors +// of T, or the products Q*X and/or Q*Y, where Q is an input matrix. If Q is the +// orthogonal factor that reduces a matrix A to Schur form T, then Q*X and Q*Y +// are the matrices of right and left eigenvectors of A. +// +// If side == lapack.RightEV, only right eigenvectors will be computed. +// If side == lapack.LeftEV, only left eigenvectors will be computed. +// If side == lapack.RightLeftEV, both right and left eigenvectors will be computed. +// For other values of side, Dtrevc3 will panic. +// +// If howmny == lapack.AllEV, all right and/or left eigenvectors will be +// computed. +// If howmny == lapack.AllEVMulQ, all right and/or left eigenvectors will be +// computed and multiplied from left by the matrices in VR and/or VL. +// If howmny == lapack.SelectedEV, right and/or left eigenvectors will be +// computed as indicated by selected. +// For other values of howmny, Dtrevc3 will panic. +// +// selected specifies which eigenvectors will be computed. It must have length n +// if howmny == lapack.SelectedEV, and it is not referenced otherwise. +// If w_j is a real eigenvalue, the corresponding real eigenvector will be +// computed if selected[j] is true. +// If w_j and w_{j+1} are the real and imaginary parts of a complex eigenvalue, +// the corresponding complex eigenvector is computed if either selected[j] or +// selected[j+1] is true, and on return selected[j] will be set to true and +// selected[j+1] will be set to false. +// +// VL and VR are n×mm matrices. If howmny is lapack.AllEV or +// lapack.AllEVMulQ, mm must be at least n. If howmny == +// lapack.SelectedEV, mm must be large enough to store the selected +// eigenvectors. Each selected real eigenvector occupies one column and each +// selected complex eigenvector occupies two columns. If mm is not sufficiently +// large, Dtrevc3 will panic. +// +// On entry, if howmny == lapack.AllEVMulQ, it is assumed that VL (if side +// is lapack.LeftEV or lapack.RightLeftEV) contains an n×n matrix QL, +// and that VR (if side is lapack.LeftEV or lapack.RightLeftEV) contains +// an n×n matrix QR. QL and QR are typically the orthogonal matrix Q of Schur +// vectors returned by Dhseqr. +// +// On return, if side is lapack.LeftEV or lapack.RightLeftEV, +// VL will contain: +// if howmny == lapack.AllEV, the matrix Y of left eigenvectors of T, +// if howmny == lapack.AllEVMulQ, the matrix Q*Y, +// if howmny == lapack.SelectedEV, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VL, in the same order as their +// eigenvalues. +// VL is not referenced if side == lapack.RightEV. +// +// On return, if side is lapack.RightEV or lapack.RightLeftEV, +// VR will contain: +// if howmny == lapack.AllEV, the matrix X of right eigenvectors of T, +// if howmny == lapack.AllEVMulQ, the matrix Q*X, +// if howmny == lapack.SelectedEV, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VR, in the same order as their +// eigenvalues. +// VR is not referenced if side == lapack.LeftEV. +// +// Complex eigenvectors corresponding to a complex eigenvalue are stored in VL +// and VR in two consecutive columns, the first holding the real part, and the +// second the imaginary part. +// +// Each eigenvector will be normalized so that the element of largest magnitude +// has magnitude 1. Here the magnitude of a complex number (x,y) is taken to be +// |x| + |y|. +// +// work must have length at least lwork and lwork must be at least max(1,3*n), +// otherwise Dtrevc3 will panic. For optimum performance, lwork should be at +// least n+2*n*nb, where nb is the optimal blocksize. +// +// If lwork == -1, instead of performing Dtrevc3, the function only estimates +// the optimal workspace size based on n and stores it into work[0]. +// +// Dtrevc3 returns the number of columns in VL and/or VR actually used to store +// the eigenvectors. +// +// Dtrevc3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrevc3(side lapack.EVSide, howmny lapack.HowMany, selected []bool, n int, t []float64, ldt int, vl []float64, ldvl int, vr []float64, ldvr int, mm int, work []float64, lwork int) (m int) { + switch side { + default: + panic(badEVSide) + case lapack.RightEV, lapack.LeftEV, lapack.RightLeftEV: + } + switch howmny { + default: + panic(badHowMany) + case lapack.AllEV, lapack.AllEVMulQ, lapack.SelectedEV: + } + switch { + case n < 0: + panic(nLT0) + case len(work) < lwork: + panic(shortWork) + case lwork < max(1, 3*n) && lwork != -1: + panic(badWork) + } + if lwork != -1 { + if howmny == lapack.SelectedEV { + if len(selected) != n { + panic("lapack: bad selected length") + } + // Set m to the number of columns required to store the + // selected eigenvectors, and standardize the slice + // selected. + for j := 0; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // Diagonal 1×1 block corresponding to a + // real eigenvalue. + if selected[j] { + m++ + } + j++ + } else { + // Diagonal 2×2 block corresponding to a + // complex eigenvalue. + if selected[j] || selected[j+1] { + selected[j] = true + selected[j+1] = false + m += 2 + } + j += 2 + } + } + } else { + m = n + } + if m > mm { + panic("lapack: insufficient number of columns") + } + checkMatrix(n, n, t, ldt) + if (side == lapack.RightEV || side == lapack.RightLeftEV) && m > 0 { + checkMatrix(n, m, vr, ldvr) + } + if (side == lapack.LeftEV || side == lapack.RightLeftEV) && m > 0 { + checkMatrix(n, m, vl, ldvl) + } + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return m + } + + const ( + nbmin = 8 + nbmax = 128 + ) + nb := impl.Ilaenv(1, "DTREVC", string(side)+string(howmny), n, -1, -1, -1) + + // Quick return in case of a workspace query. + if lwork == -1 { + work[0] = float64(n + 2*n*nb) + return m + } + + // Use blocked version of back-transformation if sufficient workspace. + // Zero-out the workspace to avoid potential NaN propagation. + if howmny == lapack.AllEVMulQ && lwork >= n+2*n*nbmin { + nb = min((lwork-n)/(2*n), nbmax) + impl.Dlaset(blas.All, n, 1+2*nb, 0, 0, work[:n+2*nb*n], 1+2*nb) + } else { + nb = 1 + } + + // Set the constants to control overflow. + ulp := dlamchP + smlnum := float64(n) / ulp * dlamchS + bignum := (1 - ulp) / smlnum + + // Split work into a vector of column norms and an n×2*nb matrix b. + norms := work[:n] + ldb := 2 * nb + b := work[n : n+n*ldb] + + // Compute 1-norm of each column of strictly upper triangular part of T + // to control overflow in triangular solver. + norms[0] = 0 + for j := 1; j < n; j++ { + var cn float64 + for i := 0; i < j; i++ { + cn += math.Abs(t[i*ldt+j]) + } + norms[j] = cn + } + + bi := blas64.Implementation() + + var ( + x [4]float64 + + iv int // Index of column in current block. + is int + + // ip is used below to specify the real or complex eigenvalue: + // ip == 0, real eigenvalue, + // 1, first of conjugate complex pair (wr,wi), + // -1, second of conjugate complex pair (wr,wi). + ip int + iscomplex [nbmax]int // Stores ip for each column in current block. + ) + + if side == lapack.LeftEV { + goto leftev + } + + // Compute right eigenvectors. + + // For complex right vector, iv-1 is for real part and iv for complex + // part. Non-blocked version always uses iv=1, blocked version starts + // with iv=nb-1 and goes down to 0 or 1. + iv = max(2, nb) - 1 + ip = 0 + is = m - 1 + for ki := n - 1; ki >= 0; ki-- { + if ip == -1 { + // Previous iteration (ki+1) was second of + // conjugate pair, so this ki is first of + // conjugate pair. + ip = 1 + continue + } + + if ki == 0 || t[ki*ldt+ki-1] == 0 { + // Last column or zero on sub-diagonal, so this + // ki must be real eigenvalue. + ip = 0 + } else { + // Non-zero on sub-diagonal, so this ki is + // second of conjugate pair. + ip = -1 + } + + if howmny == lapack.SelectedEV { + if ip == 0 { + if !selected[ki] { + continue + } + } else if !selected[ki-1] { + continue + } + } + + // Compute the ki-th eigenvalue (wr,wi). + wr := t[ki*ldt+ki] + var wi float64 + if ip != 0 { + wi = math.Sqrt(math.Abs(t[ki*ldt+ki-1])) * math.Sqrt(math.Abs(t[(ki-1)*ldt+ki])) + } + smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) + + if ip == 0 { + // Real right eigenvector. + + b[ki*ldb+iv] = 1 + // Form right-hand side. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = -t[k*ldt+ki] + } + // Solve upper quasi-triangular system: + // [ T[0:ki,0:ki] - wr ]*X = scale*b. + for j := ki - 1; j >= 0; { + if j == 0 || t[j*ldt+j-1] == 0 { + // 1×1 diagonal block. + scale, xnorm, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) + // Scale X[0,0] to avoid overflow when updating the + // right-hand side. + if xnorm > 1 && norms[j] > bignum/xnorm { + x[0] /= xnorm + scale /= xnorm + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[j*ldb+iv] = x[0] + // Update right-hand side. + bi.Daxpy(j, -x[0], t[j:], ldt, b[iv:], ldb) + j-- + } else { + // 2×2 diagonal block. + scale, xnorm, _ := impl.Dlaln2(false, 2, 1, smin, 1, t[(j-1)*ldt+j-1:], ldt, + 1, 1, b[(j-1)*ldb+iv:], ldb, wr, 0, x[:3], 2) + // Scale X[0,0] and X[1,0] to avoid overflow + // when updating the right-hand side. + if xnorm > 1 { + beta := math.Max(norms[j-1], norms[j]) + if beta > bignum/xnorm { + x[0] /= xnorm + x[2] /= xnorm + scale /= xnorm + } + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[(j-1)*ldb+iv] = x[0] + b[j*ldb+iv] = x[2] + // Update right-hand side. + bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv:], ldb) + bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv:], ldb) + j -= 2 + } + } + // Copy the vector x or Q*x to VR and normalize. + switch { + case howmny != lapack.AllEVMulQ: + // No back-transform: copy x to VR and normalize. + bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) + ii := bi.Idamax(ki+1, vr[is:], ldvr) + remax := 1 / math.Abs(vr[ii*ldvr+is]) + bi.Dscal(ki+1, remax, vr[is:], ldvr) + for k := ki + 1; k < n; k++ { + vr[k*ldvr+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if ki > 0 { + bi.Dgemv(blas.NoTrans, n, ki, 1, vr, ldvr, b[iv:], ldb, + b[ki*ldb+iv], vr[ki:], ldvr) + } + ii := bi.Idamax(n, vr[ki:], ldvr) + remax := 1 / math.Abs(vr[ii*ldvr+ki]) + bi.Dscal(n, remax, vr[ki:], ldvr) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out below vector. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv] = 0 + } + iscomplex[iv] = ip + // Back-transform and normalization is done below. + } + } else { + // Complex right eigenvector. + + // Initial solve + // [ ( T[ki-1,ki-1] T[ki-1,ki] ) - (wr + i*wi) ]*X = 0. + // [ ( T[ki, ki-1] T[ki, ki] ) ] + if math.Abs(t[(ki-1)*ldt+ki]) >= math.Abs(t[ki*ldt+ki-1]) { + b[(ki-1)*ldb+iv-1] = 1 + b[ki*ldb+iv] = wi / t[(ki-1)*ldt+ki] + } else { + b[(ki-1)*ldb+iv-1] = -wi / t[ki*ldt+ki-1] + b[ki*ldb+iv] = 1 + } + b[ki*ldb+iv-1] = 0 + b[(ki-1)*ldb+iv] = 0 + // Form right-hand side. + for k := 0; k < ki-1; k++ { + b[k*ldb+iv-1] = -b[(ki-1)*ldb+iv-1] * t[k*ldt+ki-1] + b[k*ldb+iv] = -b[ki*ldb+iv] * t[k*ldt+ki] + } + // Solve upper quasi-triangular system: + // [ T[0:ki-1,0:ki-1] - (wr+i*wi) ]*X = scale*(b1+i*b2) + for j := ki - 2; j >= 0; { + if j == 0 || t[j*ldt+j-1] == 0 { + // 1×1 diagonal block. + + scale, xnorm, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv-1:], ldb, wr, wi, x[:2], 2) + // Scale X[0,0] and X[0,1] to avoid + // overflow when updating the right-hand side. + if xnorm > 1 && norms[j] > bignum/xnorm { + x[0] /= xnorm + x[1] /= xnorm + scale /= xnorm + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv-1:], ldb) + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[j*ldb+iv-1] = x[0] + b[j*ldb+iv] = x[1] + // Update the right-hand side. + bi.Daxpy(j, -x[0], t[j:], ldt, b[iv-1:], ldb) + bi.Daxpy(j, -x[1], t[j:], ldt, b[iv:], ldb) + j-- + } else { + // 2×2 diagonal block. + + scale, xnorm, _ := impl.Dlaln2(false, 2, 2, smin, 1, t[(j-1)*ldt+j-1:], ldt, + 1, 1, b[(j-1)*ldb+iv-1:], ldb, wr, wi, x[:], 2) + // Scale X to avoid overflow when updating + // the right-hand side. + if xnorm > 1 { + beta := math.Max(norms[j-1], norms[j]) + if beta > bignum/xnorm { + rec := 1 / xnorm + x[0] *= rec + x[1] *= rec + x[2] *= rec + x[3] *= rec + scale *= rec + } + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv-1:], ldb) + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[(j-1)*ldb+iv-1] = x[0] + b[(j-1)*ldb+iv] = x[1] + b[j*ldb+iv-1] = x[2] + b[j*ldb+iv] = x[3] + // Update the right-hand side. + bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv-1:], ldb) + bi.Daxpy(j-1, -x[1], t[j-1:], ldt, b[iv:], ldb) + bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv-1:], ldb) + bi.Daxpy(j-1, -x[3], t[j:], ldt, b[iv:], ldb) + j -= 2 + } + } + + // Copy the vector x or Q*x to VR and normalize. + switch { + case howmny != lapack.AllEVMulQ: + // No back-transform: copy x to VR and normalize. + bi.Dcopy(ki+1, b[iv-1:], ldb, vr[is-1:], ldvr) + bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) + emax := 0.0 + for k := 0; k <= ki; k++ { + emax = math.Max(emax, math.Abs(vr[k*ldvr+is-1])+math.Abs(vr[k*ldvr+is])) + } + remax := 1 / emax + bi.Dscal(ki+1, remax, vr[is-1:], ldvr) + bi.Dscal(ki+1, remax, vr[is:], ldvr) + for k := ki + 1; k < n; k++ { + vr[k*ldvr+is-1] = 0 + vr[k*ldvr+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if ki-1 > 0 { + bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv-1:], ldb, + b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) + bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv:], ldb, + b[ki*ldb+iv], vr[ki:], ldvr) + } else { + bi.Dscal(n, b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) + bi.Dscal(n, b[ki*ldb+iv], vr[ki:], ldvr) + } + emax := 0.0 + for k := 0; k < n; k++ { + emax = math.Max(emax, math.Abs(vr[k*ldvr+ki-1])+math.Abs(vr[k*ldvr+ki])) + } + remax := 1 / emax + bi.Dscal(n, remax, vr[ki-1:], ldvr) + bi.Dscal(n, remax, vr[ki:], ldvr) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out below vector. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv-1] = 0 + b[k*ldb+iv] = 0 + } + iscomplex[iv-1] = -ip + iscomplex[iv] = ip + iv-- + // Back-transform and normalization is done below. + } + } + if nb > 1 { + // Blocked version of back-transform. + + // For complex case, ki2 includes both vectors (ki-1 and ki). + ki2 := ki + if ip != 0 { + ki2-- + } + // Columns iv:nb of b are valid vectors. + // When the number of vectors stored reaches nb-1 or nb, + // or if this was last vector, do the Gemm. + if iv < 2 || ki2 == 0 { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, nb-iv, ki2+nb-iv, + 1, vr, ldvr, b[iv:], ldb, + 0, b[nb+iv:], ldb) + // Normalize vectors. + var remax float64 + for k := iv; k < nb; k++ { + if iscomplex[k] == 0 { + // Real eigenvector. + ii := bi.Idamax(n, b[nb+k:], ldb) + remax = 1 / math.Abs(b[ii*ldb+nb+k]) + } else if iscomplex[k] == 1 { + // First eigenvector of conjugate pair. + emax := 0.0 + for ii := 0; ii < n; ii++ { + emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) + } + remax = 1 / emax + // Second eigenvector of conjugate pair + // will reuse this value of remax. + } + bi.Dscal(n, remax, b[nb+k:], ldb) + } + impl.Dlacpy(blas.All, n, nb-iv, b[nb+iv:], ldb, vr[ki2:], ldvr) + iv = nb - 1 + } else { + iv-- + } + } + is-- + if ip != 0 { + is-- + } + } + + if side == lapack.RightEV { + return m + } + +leftev: + // Compute left eigenvectors. + + // For complex left vector, iv is for real part and iv+1 for complex + // part. Non-blocked version always uses iv=0. Blocked version starts + // with iv=0, goes up to nb-2 or nb-1. + iv = 0 + ip = 0 + is = 0 + for ki := 0; ki < n; ki++ { + if ip == 1 { + // Previous iteration ki-1 was first of conjugate pair, + // so this ki is second of conjugate pair. + ip = -1 + continue + } + + if ki == n-1 || t[(ki+1)*ldt+ki] == 0 { + // Last column or zero on sub-diagonal, so this ki must + // be real eigenvalue. + ip = 0 + } else { + // Non-zero on sub-diagonal, so this ki is first of + // conjugate pair. + ip = 1 + } + if howmny == lapack.SelectedEV && !selected[ki] { + continue + } + + // Compute the ki-th eigenvalue (wr,wi). + wr := t[ki*ldt+ki] + var wi float64 + if ip != 0 { + wi = math.Sqrt(math.Abs(t[ki*ldt+ki+1])) * math.Sqrt(math.Abs(t[(ki+1)*ldt+ki])) + } + smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) + + if ip == 0 { + // Real left eigenvector. + + b[ki*ldb+iv] = 1 + // Form right-hand side. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv] = -t[ki*ldt+k] + } + // Solve transposed quasi-triangular system: + // [ T[ki+1:n,ki+1:n] - wr ]^T * X = scale*b + vmax := 1.0 + vcrit := bignum + for j := ki + 1; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // 1×1 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side. + if norms[j] > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + vmax = 1 + vcrit = bignum + } + b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) + // Solve [ T[j,j] - wr ]^T * X = b. + scale, _, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + } + b[j*ldb+iv] = x[0] + vmax = math.Max(math.Abs(b[j*ldb+iv]), vmax) + vcrit = bignum / vmax + j++ + } else { + // 2×2 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side. + beta := math.Max(norms[j], norms[j+1]) + if beta > vcrit { + bi.Dscal(n-ki+1, 1/vmax, b[ki*ldb+iv:], 1) + vmax = 1 + vcrit = bignum + } + b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) + b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j+1:], ldt, b[(ki+1)*ldb+iv:], ldb) + // Solve + // [ T[j,j]-wr T[j,j+1] ]^T * X = scale*[ b1 ] + // [ T[j+1,j] T[j+1,j+1]-wr ] [ b2 ] + scale, _, _ := impl.Dlaln2(true, 2, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:3], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + } + b[j*ldb+iv] = x[0] + b[(j+1)*ldb+iv] = x[2] + vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[(j+1)*ldb+iv]))) + vcrit = bignum / vmax + j += 2 + } + } + // Copy the vector x or Q*x to VL and normalize. + switch { + case howmny != lapack.AllEVMulQ: + // No back-transform: copy x to VL and normalize. + bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) + ii := bi.Idamax(n-ki, vl[ki*ldvl+is:], ldvl) + ki + remax := 1 / math.Abs(vl[ii*ldvl+is]) + bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) + for k := 0; k < ki; k++ { + vl[k*ldvl+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with Gemv, Q*x. + if n-ki-1 > 0 { + bi.Dgemv(blas.NoTrans, n, n-ki-1, + 1, vl[ki+1:], ldvl, b[(ki+1)*ldb+iv:], ldb, + b[ki*ldb+iv], vl[ki:], ldvl) + } + ii := bi.Idamax(n, vl[ki:], ldvl) + remax := 1 / math.Abs(vl[ii*ldvl+ki]) + bi.Dscal(n, remax, vl[ki:], ldvl) + default: + // Version 2: back-transform block of vectors with Gemm + // zero out above vector. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = 0 + } + iscomplex[iv] = ip + // Back-transform and normalization is done below. + } + } else { + // Complex left eigenvector. + + // Initial solve: + // [ [ T[ki,ki] T[ki,ki+1] ]^T - (wr - i* wi) ]*X = 0. + // [ [ T[ki+1,ki] T[ki+1,ki+1] ] ] + if math.Abs(t[ki*ldt+ki+1]) >= math.Abs(t[(ki+1)*ldt+ki]) { + b[ki*ldb+iv] = wi / t[ki*ldt+ki+1] + b[(ki+1)*ldb+iv+1] = 1 + } else { + b[ki*ldb+iv] = 1 + b[(ki+1)*ldb+iv+1] = -wi / t[(ki+1)*ldt+ki] + } + b[(ki+1)*ldb+iv] = 0 + b[ki*ldb+iv+1] = 0 + // Form right-hand side. + for k := ki + 2; k < n; k++ { + b[k*ldb+iv] = -b[ki*ldb+iv] * t[ki*ldt+k] + b[k*ldb+iv+1] = -b[(ki+1)*ldb+iv+1] * t[(ki+1)*ldt+k] + } + // Solve transposed quasi-triangular system: + // [ T[ki+2:n,ki+2:n]^T - (wr-i*wi) ]*X = b1+i*b2 + vmax := 1.0 + vcrit := bignum + for j := ki + 2; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // 1×1 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side elements. + if norms[j] > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) + vmax = 1 + vcrit = bignum + } + b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + // Solve [ T[j,j]-(wr-i*wi) ]*(X11+i*X12) = b1+i*b2. + scale, _, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:2], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) + } + b[j*ldb+iv] = x[0] + b[j*ldb+iv+1] = x[1] + vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[j*ldb+iv+1]))) + vcrit = bignum / vmax + j++ + } else { + // 2×2 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side elements. + if math.Max(norms[j], norms[j+1]) > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) + vmax = 1 + vcrit = bignum + } + b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[(j+1)*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + // Solve 2×2 complex linear equation + // [ [T[j,j] T[j,j+1] ]^T - (wr-i*wi)*I ]*X = scale*b + // [ [T[j+1,j] T[j+1,j+1]] ] + scale, _, _ := impl.Dlaln2(true, 2, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) + } + b[j*ldb+iv] = x[0] + b[j*ldb+iv+1] = x[1] + b[(j+1)*ldb+iv] = x[2] + b[(j+1)*ldb+iv+1] = x[3] + vmax01 := math.Max(math.Abs(x[0]), math.Abs(x[1])) + vmax23 := math.Max(math.Abs(x[2]), math.Abs(x[3])) + vmax = math.Max(vmax, math.Max(vmax01, vmax23)) + vcrit = bignum / vmax + j += 2 + } + } + // Copy the vector x or Q*x to VL and normalize. + switch { + case howmny != lapack.AllEVMulQ: + // No back-transform: copy x to VL and normalize. + bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) + bi.Dcopy(n-ki, b[ki*ldb+iv+1:], ldb, vl[ki*ldvl+is+1:], ldvl) + emax := 0.0 + for k := ki; k < n; k++ { + emax = math.Max(emax, math.Abs(vl[k*ldvl+is])+math.Abs(vl[k*ldvl+is+1])) + } + remax := 1 / emax + bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) + bi.Dscal(n-ki, remax, vl[ki*ldvl+is+1:], ldvl) + for k := 0; k < ki; k++ { + vl[k*ldvl+is] = 0 + vl[k*ldvl+is+1] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if n-ki-2 > 0 { + bi.Dgemv(blas.NoTrans, n, n-ki-2, + 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv:], ldb, + b[ki*ldb+iv], vl[ki:], ldvl) + bi.Dgemv(blas.NoTrans, n, n-ki-2, + 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv+1:], ldb, + b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) + } else { + bi.Dscal(n, b[ki*ldb+iv], vl[ki:], ldvl) + bi.Dscal(n, b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) + } + emax := 0.0 + for k := 0; k < n; k++ { + emax = math.Max(emax, math.Abs(vl[k*ldvl+ki])+math.Abs(vl[k*ldvl+ki+1])) + } + remax := 1 / emax + bi.Dscal(n, remax, vl[ki:], ldvl) + bi.Dscal(n, remax, vl[ki+1:], ldvl) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out above vector. + // Could go from ki-nv+1 to ki-1. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = 0 + b[k*ldb+iv+1] = 0 + } + iscomplex[iv] = ip + iscomplex[iv+1] = -ip + iv++ + // Back-transform and normalization is done below. + } + } + if nb > 1 { + // Blocked version of back-transform. + // For complex case, ki2 includes both vectors ki and ki+1. + ki2 := ki + if ip != 0 { + ki2++ + } + // Columns [0:iv] of work are valid vectors. When the + // number of vectors stored reaches nb-1 or nb, or if + // this was last vector, do the Gemm. + if iv >= nb-2 || ki2 == n-1 { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, iv+1, n-ki2+iv, + 1, vl[ki2-iv:], ldvl, b[(ki2-iv)*ldb:], ldb, + 0, b[nb:], ldb) + // Normalize vectors. + var remax float64 + for k := 0; k <= iv; k++ { + if iscomplex[k] == 0 { + // Real eigenvector. + ii := bi.Idamax(n, b[nb+k:], ldb) + remax = 1 / math.Abs(b[ii*ldb+nb+k]) + } else if iscomplex[k] == 1 { + // First eigenvector of conjugate pair. + emax := 0.0 + for ii := 0; ii < n; ii++ { + emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) + } + remax = 1 / emax + // Second eigenvector of conjugate pair + // will reuse this value of remax. + } + bi.Dscal(n, remax, b[nb+k:], ldb) + } + impl.Dlacpy(blas.All, n, iv+1, b[nb:], ldb, vl[ki2-iv:], ldvl) + iv = 0 + } else { + iv++ + } + } + is++ + if ip != 0 { + is++ + } + } + + return m +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go new file mode 100644 index 00000000..40e03785 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go @@ -0,0 +1,221 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Dtrexc reorders the real Schur factorization of a n×n real matrix +// A = Q*T*Q^T +// so that the diagonal block of T with row index ifst is moved to row ilst. +// +// On entry, T must be in Schur canonical form, that is, block upper triangular +// with 1×1 and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal +// elements equal and its off-diagonal elements of opposite sign. +// +// On return, T will be reordered by an orthogonal similarity transformation Z +// as Z^T*T*Z, and will be again in Schur canonical form. +// +// If compq is lapack.UpdateSchur, on return the matrix Q of Schur vectors will be +// updated by postmultiplying it with Z. +// If compq is lapack.None, the matrix Q is not referenced and will not be +// updated. +// For other values of compq Dtrexc will panic. +// +// ifst and ilst specify the reordering of the diagonal blocks of T. The block +// with row index ifst is moved to row ilst, by a sequence of transpositions +// between adjacent blocks. +// +// If ifst points to the second row of a 2×2 block, ifstOut will point to the +// first row, otherwise it will be equal to ifst. +// +// ilstOut will point to the first row of the block in its final position. If ok +// is true, ilstOut may differ from ilst by +1 or -1. +// +// It must hold that +// 0 <= ifst < n, and 0 <= ilst < n, +// otherwise Dtrexc will panic. +// +// If ok is false, two adjacent blocks were too close to swap because the +// problem is very ill-conditioned. T may have been partially reordered, and +// ilstOut will point to the first row of the block at the position to which it +// has been moved. +// +// work must have length at least n, otherwise Dtrexc will panic. +// +// Dtrexc is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrexc(compq lapack.EVComp, n int, t []float64, ldt int, q []float64, ldq int, ifst, ilst int, work []float64) (ifstOut, ilstOut int, ok bool) { + checkMatrix(n, n, t, ldt) + var wantq bool + switch compq { + default: + panic("lapack: bad value of compq") + case lapack.None: + // Nothing to do because wantq is already false. + case lapack.UpdateSchur: + wantq = true + checkMatrix(n, n, q, ldq) + } + if (ifst < 0 || n <= ifst) && n > 0 { + panic("lapack: ifst out of range") + } + if (ilst < 0 || n <= ilst) && n > 0 { + panic("lapack: ilst out of range") + } + if len(work) < n { + panic(badWork) + } + + ok = true + + // Quick return if possible. + if n <= 1 { + return ifst, ilst, true + } + + // Determine the first row of specified block + // and find out it is 1×1 or 2×2. + if ifst > 0 && t[ifst*ldt+ifst-1] != 0 { + ifst-- + } + nbf := 1 // Size of the first block. + if ifst+1 < n && t[(ifst+1)*ldt+ifst] != 0 { + nbf = 2 + } + // Determine the first row of the final block + // and find out it is 1×1 or 2×2. + if ilst > 0 && t[ilst*ldt+ilst-1] != 0 { + ilst-- + } + nbl := 1 // Size of the last block. + if ilst+1 < n && t[(ilst+1)*ldt+ilst] != 0 { + nbl = 2 + } + + switch { + case ifst == ilst: + return ifst, ilst, true + + case ifst < ilst: + // Update ilst. + switch { + case nbf == 2 && nbl == 1: + ilst-- + case nbf == 1 && nbl == 2: + ilst++ + } + here := ifst + for here < ilst { + // Swap block with next one below. + if nbf == 1 || nbf == 2 { + // Current block either 1×1 or 2×2. + nbnext := 1 // Size of the next block. + if here+nbf+1 < n && t[(here+nbf+1)*ldt+here+nbf] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbf, nbnext, work) + if !ok { + return ifst, here, false + } + here += nbnext + // Test if 2×2 block breaks into two 1×1 blocks. + if nbf == 2 && t[(here+1)*ldt+here] == 0 { + nbf = 3 + } + continue + } + + // Current block consists of two 1×1 blocks each of + // which must be swapped individually. + nbnext := 1 // Size of the next block. + if here+3 < n && t[(here+3)*ldt+here+2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, nbnext, work) + if !ok { + return ifst, here, false + } + if nbnext == 1 { + // Swap two 1×1 blocks, no problems possible. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) + here++ + continue + } + // Recompute nbnext in case 2×2 split. + if t[(here+2)*ldt+here+1] == 0 { + nbnext = 1 + } + if nbnext == 2 { + // 2×2 block did not split. + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) + if !ok { + return ifst, here, false + } + } else { + // 2×2 block did split. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, 1, work) + } + here += 2 + } + return ifst, here, true + + default: // ifst > ilst + here := ifst + for here > ilst { + // Swap block with next one above. + if nbf == 1 || nbf == 2 { + // Current block either 1×1 or 2×2. + nbnext := 1 + if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, nbf, work) + if !ok { + return ifst, here, false + } + here -= nbnext + // Test if 2×2 block breaks into two 1×1 blocks. + if nbf == 2 && t[(here+1)*ldt+here] == 0 { + nbf = 3 + } + continue + } + + // Current block consists of two 1×1 blocks each of + // which must be swapped individually. + nbnext := 1 + if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, 1, work) + if !ok { + return ifst, here, false + } + if nbnext == 1 { + // Swap two 1×1 blocks, no problems possible. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbnext, 1, work) + here-- + continue + } + // Recompute nbnext in case 2×2 split. + if t[here*ldt+here-1] == 0 { + nbnext = 1 + } + if nbnext == 2 { + // 2×2 block did not split. + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 2, 1, work) + if !ok { + return ifst, here, false + } + } else { + // 2×2 block did split. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 1, 1, work) + } + here -= 2 + } + return ifst, here, true + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go new file mode 100644 index 00000000..a43efe6f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go @@ -0,0 +1,57 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrti2 computes the inverse of a triangular matrix, storing the result in place +// into a. This is the BLAS level 2 version of the algorithm. +// +// Dtrti2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrti2(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) { + checkMatrix(n, n, a, lda) + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if diag != blas.NonUnit && diag != blas.Unit { + panic(badDiag) + } + bi := blas64.Implementation() + + nonUnit := diag == blas.NonUnit + // TODO(btracey): Replace this with a row-major ordering. + if uplo == blas.Upper { + for j := 0; j < n; j++ { + var ajj float64 + if nonUnit { + ajj = 1 / a[j*lda+j] + a[j*lda+j] = ajj + ajj *= -1 + } else { + ajj = -1 + } + bi.Dtrmv(blas.Upper, blas.NoTrans, diag, j, a, lda, a[j:], lda) + bi.Dscal(j, ajj, a[j:], lda) + } + return + } + for j := n - 1; j >= 0; j-- { + var ajj float64 + if nonUnit { + ajj = 1 / a[j*lda+j] + a[j*lda+j] = ajj + ajj *= -1 + } else { + ajj = -1 + } + if j < n-1 { + bi.Dtrmv(blas.Lower, blas.NoTrans, diag, n-j-1, a[(j+1)*lda+j+1:], lda, a[(j+1)*lda+j:], lda) + bi.Dscal(n-j-1, ajj, a[(j+1)*lda+j:], lda) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go new file mode 100644 index 00000000..95f1b3be --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go @@ -0,0 +1,64 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrtri computes the inverse of a triangular matrix, storing the result in place +// into a. This is the BLAS level 3 version of the algorithm which builds upon +// Dtrti2 to operate on matrix blocks instead of only individual columns. +// +// Dtrtri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +func (impl Implementation) Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) { + checkMatrix(n, n, a, lda) + if uplo != blas.Upper && uplo != blas.Lower { + panic(badUplo) + } + if diag != blas.NonUnit && diag != blas.Unit { + panic(badDiag) + } + if n == 0 { + return false + } + nonUnit := diag == blas.NonUnit + if nonUnit { + for i := 0; i < n; i++ { + if a[i*lda+i] == 0 { + return false + } + } + } + + bi := blas64.Implementation() + + nb := impl.Ilaenv(1, "DTRTRI", "UD", n, -1, -1, -1) + if nb <= 1 || nb > n { + impl.Dtrti2(uplo, diag, n, a, lda) + return true + } + if uplo == blas.Upper { + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dtrmm(blas.Left, blas.Upper, blas.NoTrans, diag, j, jb, 1, a, lda, a[j:], lda) + bi.Dtrsm(blas.Right, blas.Upper, blas.NoTrans, diag, j, jb, -1, a[j*lda+j:], lda, a[j:], lda) + impl.Dtrti2(blas.Upper, diag, jb, a[j*lda+j:], lda) + } + return true + } + nn := ((n - 1) / nb) * nb + for j := nn; j >= 0; j -= nb { + jb := min(nb, n-j) + if j+jb <= n-1 { + bi.Dtrmm(blas.Left, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, 1, a[(j+jb)*lda+j+jb:], lda, a[(j+jb)*lda+j:], lda) + bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, -1, a[j*lda+j:], lda, a[(j+jb)*lda+j:], lda) + } + impl.Dtrti2(blas.Lower, diag, jb, a[j*lda+j:], lda) + } + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go new file mode 100644 index 00000000..e1782d23 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go @@ -0,0 +1,30 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrtrs solves a triangular system of the form A * X = B or A^T * X = B. Dtrtrs +// returns whether the solve completed successfully. If A is singular, no solve is performed. +func (impl Implementation) Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) { + nounit := diag == blas.NonUnit + if n == 0 { + return false + } + // Check for singularity. + if nounit { + for i := 0; i < n; i++ { + if a[i*lda+i] == 0 { + return false + } + } + } + bi := blas64.Implementation() + bi.Dtrsm(blas.Left, uplo, trans, diag, n, nrhs, 1, a, lda, b, ldb) + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/general.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/general.go new file mode 100644 index 00000000..a82d9969 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/general.go @@ -0,0 +1,143 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/lapack" +) + +// Implementation is the native Go implementation of LAPACK routines. It +// is built on top of calls to the return of blas64.Implementation(), so while +// this code is in pure Go, the underlying BLAS implementation may not be. +type Implementation struct{} + +var _ lapack.Float64 = Implementation{} + +// This list is duplicated in lapack/cgo. Keep in sync. +const ( + absIncNotOne = "lapack: increment not one or negative one" + badAlpha = "lapack: bad alpha length" + badAuxv = "lapack: auxv has insufficient length" + badBeta = "lapack: bad beta length" + badD = "lapack: d has insufficient length" + badDecompUpdate = "lapack: bad decomp update" + badDiag = "lapack: bad diag" + badDims = "lapack: bad input dimensions" + badDirect = "lapack: bad direct" + badE = "lapack: e has insufficient length" + badEVComp = "lapack: bad EVComp" + badEVJob = "lapack: bad EVJob" + badEVSide = "lapack: bad EVSide" + badGSVDJob = "lapack: bad GSVDJob" + badHowMany = "lapack: bad HowMany" + badIlo = "lapack: ilo out of range" + badIhi = "lapack: ihi out of range" + badIpiv = "lapack: bad permutation length" + badJob = "lapack: bad Job" + badK1 = "lapack: k1 out of range" + badK2 = "lapack: k2 out of range" + badKperm = "lapack: incorrect permutation length" + badLdA = "lapack: index of a out of range" + badNb = "lapack: nb out of range" + badNorm = "lapack: bad norm" + badPivot = "lapack: bad pivot" + badS = "lapack: s has insufficient length" + badShifts = "lapack: bad shifts" + badSide = "lapack: bad side" + badSlice = "lapack: bad input slice length" + badSort = "lapack: bad Sort" + badStore = "lapack: bad store" + badTau = "lapack: tau has insufficient length" + badTauQ = "lapack: tauQ has insufficient length" + badTauP = "lapack: tauP has insufficient length" + badTrans = "lapack: bad trans" + badVn1 = "lapack: vn1 has insufficient length" + badVn2 = "lapack: vn2 has insufficient length" + badUplo = "lapack: illegal triangle" + badWork = "lapack: insufficient working memory" + badZ = "lapack: insufficient z length" + kGTM = "lapack: k > m" + kGTN = "lapack: k > n" + kLT0 = "lapack: k < 0" + mLTN = "lapack: m < n" + nanScale = "lapack: NaN scale factor" + negDimension = "lapack: negative matrix dimension" + negZ = "lapack: negative z value" + nLT0 = "lapack: n < 0" + nLTM = "lapack: n < m" + offsetGTM = "lapack: offset > m" + shortWork = "lapack: working array shorter than declared" + zeroDiv = "lapack: zero divisor" +) + +// checkMatrix verifies the parameters of a matrix input. +func checkMatrix(m, n int, a []float64, lda int) { + if m < 0 { + panic("lapack: has negative number of rows") + } + if n < 0 { + panic("lapack: has negative number of columns") + } + if lda < n { + panic("lapack: stride less than number of columns") + } + if len(a) < (m-1)*lda+n { + panic("lapack: insufficient matrix slice length") + } +} + +func checkVector(n int, v []float64, inc int) { + if n < 0 { + panic("lapack: negative vector length") + } + if (inc > 0 && (n-1)*inc >= len(v)) || (inc < 0 && (1-n)*inc >= len(v)) { + panic("lapack: insufficient vector slice length") + } +} + +func checkSymBanded(ab []float64, n, kd, lda int) { + if n < 0 { + panic("lapack: negative banded length") + } + if kd < 0 { + panic("lapack: negative bandwidth value") + } + if lda < kd+1 { + panic("lapack: stride less than number of bands") + } + if len(ab) < (n-1)*lda+kd { + panic("lapack: insufficient banded vector length") + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +const ( + // dlamchE is the machine epsilon. For IEEE this is 2^{-53}. + dlamchE = 1.0 / (1 << 53) + + // dlamchB is the radix of the machine (the base of the number system). + dlamchB = 2 + + // dlamchP is base * eps. + dlamchP = dlamchB * dlamchE + + // dlamchS is the "safe minimum", that is, the lowest number such that + // 1/dlamchS does not overflow, or also the smallest normal number. + // For IEEE this is 2^{-1022}. + dlamchS = 1.0 / (1 << 256) / (1 << 256) / (1 << 256) / (1 << 254) +) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go new file mode 100644 index 00000000..bd0e4d8f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go @@ -0,0 +1,33 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Iladlc scans a matrix for its last non-zero column. Returns -1 if the matrix +// is all zeros. +// +// Iladlc is an internal routine. It is exported for testing purposes. +func (Implementation) Iladlc(m, n int, a []float64, lda int) int { + if n == 0 || m == 0 { + return n - 1 + } + checkMatrix(m, n, a, lda) + + // Test common case where corner is non-zero. + if a[n-1] != 0 || a[(m-1)*lda+(n-1)] != 0 { + return n - 1 + } + + // Scan each row tracking the highest column seen. + highest := -1 + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + if a[i*lda+j] != 0 { + highest = max(highest, j) + break + } + } + } + return highest +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go new file mode 100644 index 00000000..9f9e0d93 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go @@ -0,0 +1,30 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Iladlr scans a matrix for its last non-zero row. Returns -1 if the matrix +// is all zeros. +// +// Iladlr is an internal routine. It is exported for testing purposes. +func (Implementation) Iladlr(m, n int, a []float64, lda int) int { + if m == 0 { + return m - 1 + } + + checkMatrix(m, n, a, lda) + + // Check the common case where the corner is non-zero + if a[(m-1)*lda] != 0 || a[(m-1)*lda+n-1] != 0 { + return m - 1 + } + for i := m - 1; i >= 0; i-- { + for j := 0; j < n; j++ { + if a[i*lda+j] != 0 { + return i + } + } + } + return -1 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go new file mode 100644 index 00000000..7f08ba60 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go @@ -0,0 +1,387 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Ilaenv returns algorithm tuning parameters for the algorithm given by the +// input string. ispec specifies the parameter to return: +// 1: The optimal block size for a blocked algorithm. +// 2: The minimum block size for a blocked algorithm. +// 3: The block size of unprocessed data at which a blocked algorithm should +// crossover to an unblocked version. +// 4: The number of shifts. +// 5: The minimum column dimension for blocking to be used. +// 6: The crossover point for SVD (to use QR factorization or not). +// 7: The number of processors. +// 8: The crossover point for multi-shift in QR and QZ methods for non-symmetric eigenvalue problems. +// 9: Maximum size of the subproblems in divide-and-conquer algorithms. +// 10: ieee NaN arithmetic can be trusted not to trap. +// 11: infinity arithmetic can be trusted not to trap. +// 12...16: parameters for Dhseqr and related functions. See Iparmq for more +// information. +// +// Ilaenv is an internal routine. It is exported for testing purposes. +func (impl Implementation) Ilaenv(ispec int, s string, opts string, n1, n2, n3, n4 int) int { + // TODO(btracey): Replace this with a constant lookup? A list of constants? + sname := s[0] == 'S' || s[0] == 'D' + cname := s[0] == 'C' || s[0] == 'Z' + if !sname && !cname { + panic("lapack: bad name") + } + c2 := s[1:3] + c3 := s[3:6] + c4 := c3[1:3] + + switch ispec { + default: + panic("lapack: bad ispec") + case 1: + switch c2 { + default: + panic("lapack: bad function name") + case "GE": + switch c3 { + default: + panic("lapack: bad function name") + case "TRF": + if sname { + return 64 + } + return 64 + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 32 + } + return 32 + case "HRD": + if sname { + return 32 + } + return 32 + case "BRD": + if sname { + return 32 + } + return 32 + case "TRI": + if sname { + return 64 + } + return 64 + } + case "PO": + switch c3 { + default: + panic("lapack: bad function name") + case "TRF": + if sname { + return 64 + } + return 64 + } + case "SY": + switch c3 { + default: + panic("lapack: bad function name") + case "TRF": + if sname { + return 64 + } + return 64 + case "TRD": + return 32 + case "GST": + return 64 + } + case "HE": + switch c3 { + default: + panic("lapack: bad function name") + case "TRF": + return 64 + case "TRD": + return 32 + case "GST": + return 64 + } + case "OR": + switch c3[0] { + default: + panic("lapack: bad function name") + case 'G': + switch c3[1:] { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + case 'M': + switch c3[1:] { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + } + case "UN": + switch c3[0] { + default: + panic("lapack: bad function name") + case 'G': + switch c3[1:] { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + case 'M': + switch c3[1:] { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + } + case "GB": + switch c3 { + default: + panic("lapack: bad function name") + case "TRF": + if sname { + if n4 <= 64 { + return 1 + } + return 32 + } + if n4 <= 64 { + return 1 + } + return 32 + } + case "PB": + switch c3 { + default: + panic("lapack: bad function name") + case "TRF": + if sname { + if n4 <= 64 { + return 1 + } + return 32 + } + if n4 <= 64 { + return 1 + } + return 32 + } + case "TR": + switch c3 { + default: + panic("lapack: bad function name") + case "TRI": + if sname { + return 64 + } + return 64 + case "EVC": + if sname { + return 64 + } + return 64 + } + case "LA": + switch c3 { + default: + panic("lapack: bad function name") + case "UUM": + if sname { + return 64 + } + return 64 + } + case "ST": + if sname && c3 == "EBZ" { + return 1 + } + panic("lapack: bad function name") + } + case 2: + switch c2 { + default: + panic("lapack: bad function name") + case "GE": + switch c3 { + default: + panic("lapack: bad function name") + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 2 + } + return 2 + case "HRD": + if sname { + return 2 + } + return 2 + case "BRD": + if sname { + return 2 + } + return 2 + case "TRI": + if sname { + return 2 + } + return 2 + } + case "SY": + switch c3 { + default: + panic("lapack: bad function name") + case "TRF": + if sname { + return 8 + } + return 8 + case "TRD": + if sname { + return 2 + } + panic("lapack: bad function name") + } + case "HE": + if c3 == "TRD" { + return 2 + } + panic("lapack: bad function name") + case "OR": + if !sname { + panic("lapack: bad function name") + } + switch c3[0] { + default: + panic("lapack: bad function name") + case 'G': + switch c4 { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + case 'M': + switch c4 { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + } + case "UN": + switch c3[0] { + default: + panic("lapack: bad function name") + case 'G': + switch c4 { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + case 'M': + switch c4 { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + } + } + case 3: + switch c2 { + default: + panic("lapack: bad function name") + case "GE": + switch c3 { + default: + panic("lapack: bad function name") + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 128 + } + return 128 + case "HRD": + if sname { + return 128 + } + return 128 + case "BRD": + if sname { + return 128 + } + return 128 + } + case "SY": + if sname && c3 == "TRD" { + return 32 + } + panic("lapack: bad function name") + case "HE": + if c3 == "TRD" { + return 32 + } + panic("lapack: bad function name") + case "OR": + switch c3[0] { + default: + panic("lapack: bad function name") + case 'G': + switch c4 { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 128 + } + } + case "UN": + switch c3[0] { + default: + panic("lapack: bad function name") + case 'G': + switch c4 { + default: + panic("lapack: bad function name") + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 128 + } + } + } + case 4: + // Used by xHSEQR + return 6 + case 5: + // Not used + return 2 + case 6: + // Used by xGELSS and xGESVD + return int(float64(min(n1, n2)) * 1.6) + case 7: + // Not used + return 1 + case 8: + // Used by xHSEQR + return 50 + case 9: + // used by xGELSD and xGESDD + return 25 + case 10: + // Go guarantees ieee + return 1 + case 11: + // Go guarantees ieee + return 1 + case 12, 13, 14, 15, 16: + // Dhseqr and related functions for eigenvalue problems. + return impl.Iparmq(ispec, s, opts, n1, n2, n3, n4) + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go new file mode 100644 index 00000000..ea3d6dfb --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go @@ -0,0 +1,115 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Iparmq returns problem and machine dependent parameters useful for Dhseqr and +// related subroutines for eigenvalue problems. +// +// ispec specifies the parameter to return: +// 12: Crossover point between Dlahqr and Dlaqr0. Will be at least 11. +// 13: Deflation window size. +// 14: Nibble crossover point. Determines when to skip a multi-shift QR sweep. +// 15: Number of simultaneous shifts in a multishift QR iteration. +// 16: Select structured matrix multiply. +// For other values of ispec Iparmq will panic. +// +// name is the name of the calling function. name must be in uppercase but this +// is not checked. +// +// opts is not used and exists for future use. +// +// n is the order of the Hessenberg matrix H. +// +// ilo and ihi specify the block [ilo:ihi+1,ilo:ihi+1] that is being processed. +// +// lwork is the amount of workspace available. +// +// Except for ispec input parameters are not checked. +// +// Iparmq is an internal routine. It is exported for testing purposes. +func (Implementation) Iparmq(ispec int, name, opts string, n, ilo, ihi, lwork int) int { + nh := ihi - ilo + 1 + ns := 2 + switch { + case nh >= 30: + ns = 4 + case nh >= 60: + ns = 10 + case nh >= 150: + ns = max(10, nh/int(math.Log(float64(nh))/math.Ln2)) + case nh >= 590: + ns = 64 + case nh >= 3000: + ns = 128 + case nh >= 6000: + ns = 256 + } + ns = max(2, ns-(ns%2)) + + switch ispec { + default: + panic("lapack: bad ispec") + + case 12: + // Matrices of order smaller than nmin get sent to Dlahqr, the + // classic double shift algorithm. This must be at least 11. + const nmin = 75 + return nmin + + case 13: + const knwswp = 500 + if nh <= knwswp { + return ns + } + return 3 * ns / 2 + + case 14: + // Skip a computationally expensive multi-shift QR sweep with + // Dlaqr5 whenever aggressive early deflation finds at least + // nibble*(window size)/100 deflations. The default, small, + // value reflects the expectation that the cost of looking + // through the deflation window with Dlaqr3 will be + // substantially smaller. + const nibble = 14 + return nibble + + case 15: + return ns + + case 16: + if len(name) != 6 { + panic("lapack: bad name") + } + const ( + k22min = 14 + kacmin = 14 + ) + var acc22 int + switch { + case name[1:] == "GGHRD" || name[1:] == "GGHD3": + acc22 = 1 + if nh >= k22min { + acc22 = 2 + } + case name[3:] == "EXC": + if nh >= kacmin { + acc22 = 1 + } + if nh >= k22min { + acc22 = 2 + } + case name[1:] == "HSEQR" || name[1:5] == "LAQR": + if ns >= kacmin { + acc22 = 1 + } + if ns >= k22min { + acc22 = 2 + } + } + return acc22 + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack.go new file mode 100644 index 00000000..5029389c --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack.go @@ -0,0 +1,188 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lapack + +import "gonum.org/v1/gonum/blas" + +const None = 'N' + +type Job byte + +type Comp byte + +// Complex128 defines the public complex128 LAPACK API supported by gonum/lapack. +type Complex128 interface{} + +// Float64 defines the public float64 LAPACK API supported by gonum/lapack. +type Float64 interface { + Dgecon(norm MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 + Dgeev(jobvl LeftEVJob, jobvr RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) + Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool + Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) + Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) + Dgesvd(jobU, jobVT SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) + Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) + Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) + Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) + Dggsvd3(jobU, jobV, jobQ GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) + Dlantr(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 + Dlange(norm MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 + Dlansy(norm MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 + Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) + Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) + Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) + Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 + Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) + Dsyev(jobz EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) + Dtrcon(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 + Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) + Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) +} + +// Direct specifies the direction of the multiplication for the Householder matrix. +type Direct byte + +const ( + Forward Direct = 'F' // Reflectors are right-multiplied, H_0 * H_1 * ... * H_{k-1}. + Backward Direct = 'B' // Reflectors are left-multiplied, H_{k-1} * ... * H_1 * H_0. +) + +// Sort is the sorting order. +type Sort byte + +const ( + SortIncreasing Sort = 'I' + SortDecreasing Sort = 'D' +) + +// StoreV indicates the storage direction of elementary reflectors. +type StoreV byte + +const ( + ColumnWise StoreV = 'C' // Reflector stored in a column of the matrix. + RowWise StoreV = 'R' // Reflector stored in a row of the matrix. +) + +// MatrixNorm represents the kind of matrix norm to compute. +type MatrixNorm byte + +const ( + MaxAbs MatrixNorm = 'M' // max(abs(A(i,j))) ('M') + MaxColumnSum MatrixNorm = 'O' // Maximum column sum (one norm) ('1', 'O') + MaxRowSum MatrixNorm = 'I' // Maximum row sum (infinity norm) ('I', 'i') + NormFrob MatrixNorm = 'F' // Frobenius norm (sqrt of sum of squares) ('F', 'f', E, 'e') +) + +// MatrixType represents the kind of matrix represented in the data. +type MatrixType byte + +const ( + General MatrixType = 'G' // A dense matrix (like blas64.General). + UpperTri MatrixType = 'U' // An upper triangular matrix. + LowerTri MatrixType = 'L' // A lower triangular matrix. +) + +// Pivot specifies the pivot type for plane rotations +type Pivot byte + +const ( + Variable Pivot = 'V' + Top Pivot = 'T' + Bottom Pivot = 'B' +) + +type DecompUpdate byte + +const ( + ApplyP DecompUpdate = 'P' + ApplyQ DecompUpdate = 'Q' +) + +// SVDJob specifies the singular vector computation type for SVD. +type SVDJob byte + +const ( + SVDAll SVDJob = 'A' // Compute all singular vectors + SVDInPlace SVDJob = 'S' // Compute the first singular vectors and store them in provided storage. + SVDOverwrite SVDJob = 'O' // Compute the singular vectors and store them in input matrix + SVDNone SVDJob = 'N' // Do not compute singular vectors +) + +// GSVDJob specifies the singular vector computation type for Generalized SVD. +type GSVDJob byte + +const ( + GSVDU GSVDJob = 'U' // Compute orthogonal matrix U + GSVDV GSVDJob = 'V' // Compute orthogonal matrix V + GSVDQ GSVDJob = 'Q' // Compute orthogonal matrix Q + GSVDUnit GSVDJob = 'I' // Use unit-initialized matrix + GSVDNone GSVDJob = 'N' // Do not compute orthogonal matrix +) + +// EVComp specifies how eigenvectors are computed. +type EVComp byte + +const ( + // OriginalEV specifies to compute the eigenvectors of the original + // matrix. + OriginalEV EVComp = 'V' + // TridiagEV specifies to compute both the eigenvectors of the input + // tridiagonal matrix. + TridiagEV EVComp = 'I' + // HessEV specifies to compute both the eigenvectors of the input upper + // Hessenberg matrix. + HessEV EVComp = 'I' + + // UpdateSchur specifies that the matrix of Schur vectors will be + // updated by Dtrexc. + UpdateSchur EVComp = 'V' +) + +// Job types for computation of eigenvectors. +type ( + EVJob byte + LeftEVJob byte + RightEVJob byte +) + +// Job constants for computation of eigenvectors. +const ( + ComputeEV EVJob = 'V' // Compute eigenvectors in Dsyev. + ComputeLeftEV LeftEVJob = 'V' // Compute left eigenvectors. + ComputeRightEV RightEVJob = 'V' // Compute right eigenvectors. +) + +// Jobs for Dgebal. +const ( + Permute Job = 'P' + Scale Job = 'S' + PermuteScale Job = 'B' +) + +// Job constants for Dhseqr. +const ( + EigenvaluesOnly EVJob = 'E' + EigenvaluesAndSchur EVJob = 'S' +) + +// EVSide specifies what eigenvectors will be computed. +type EVSide byte + +// EVSide constants for Dtrevc3. +const ( + RightEV EVSide = 'R' // Compute right eigenvectors only. + LeftEV EVSide = 'L' // Compute left eigenvectors only. + RightLeftEV EVSide = 'B' // Compute both right and left eigenvectors. +) + +// HowMany specifies which eigenvectors will be computed. +type HowMany byte + +// HowMany constants for Dhseqr. +const ( + AllEV HowMany = 'A' // Compute all right and/or left eigenvectors. + AllEVMulQ HowMany = 'B' // Compute all right and/or left eigenvectors multiplied by an input matrix. + SelectedEV HowMany = 'S' // Compute selected right and/or left eigenvectors. +) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go new file mode 100644 index 00000000..865fb0d5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go @@ -0,0 +1,20 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lapack64 provides a set of convenient wrapper functions for LAPACK +// calls, as specified in the netlib standard (www.netlib.org). +// +// The native Go routines are used by default, and the Use function can be used +// to set an alternative implementation. +// +// If the type of matrix (General, Symmetric, etc.) is known and fixed, it is +// used in the wrapper signature. In many cases, however, the type of the matrix +// changes during the call to the routine, for example the matrix is symmetric on +// entry and is triangular on exit. In these cases the correct types should be checked +// in the documentation. +// +// The full set of Lapack functions is very large, and it is not clear that a +// full implementation is desirable, let alone feasible. Please open up an issue +// if there is a specific function you need and/or are willing to implement. +package lapack64 diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go new file mode 100644 index 00000000..259f8cc6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go @@ -0,0 +1,545 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lapack64 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/gonum" +) + +var lapack64 lapack.Float64 = gonum.Implementation{} + +// Use sets the LAPACK float64 implementation to be used by subsequent BLAS calls. +// The default implementation is native.Implementation. +func Use(l lapack.Float64) { + lapack64 = l +} + +// Potrf computes the Cholesky factorization of a. +// The factorization has the form +// A = U^T * U if a.Uplo == blas.Upper, or +// A = L * L^T if a.Uplo == blas.Lower, +// where U is an upper triangular matrix and L is lower triangular. +// The triangular matrix is returned in t, and the underlying data between +// a and t is shared. The returned bool indicates whether a is positive +// definite and the factorization could be finished. +func Potrf(a blas64.Symmetric) (t blas64.Triangular, ok bool) { + ok = lapack64.Dpotrf(a.Uplo, a.N, a.Data, a.Stride) + t.Uplo = a.Uplo + t.N = a.N + t.Data = a.Data + t.Stride = a.Stride + t.Diag = blas.NonUnit + return +} + +// Gecon estimates the reciprocal of the condition number of the n×n matrix A +// given the LU decomposition of the matrix. The condition number computed may +// be based on the 1-norm or the ∞-norm. +// +// a contains the result of the LU decomposition of A as computed by Getrf. +// +// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 4*n and Gecon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Gecon will panic otherwise. +func Gecon(norm lapack.MatrixNorm, a blas64.General, anorm float64, work []float64, iwork []int) float64 { + return lapack64.Dgecon(norm, a.Cols, a.Data, a.Stride, anorm, work, iwork) +} + +// Gels finds a minimum-norm solution based on the matrices A and B using the +// QR or LQ factorization. Gels returns false if the matrix +// A is singular, and true if this solution was successfully found. +// +// The minimization problem solved depends on the input parameters. +// +// 1. If m >= n and trans == blas.NoTrans, Gels finds X such that || A*X - B||_2 +// is minimized. +// 2. If m < n and trans == blas.NoTrans, Gels finds the minimum norm solution of +// A * X = B. +// 3. If m >= n and trans == blas.Trans, Gels finds the minimum norm solution of +// A^T * X = B. +// 4. If m < n and trans == blas.Trans, Gels finds X such that || A*X - B||_2 +// is minimized. +// Note that the least-squares solutions (cases 1 and 3) perform the minimization +// per column of B. This is not the same as finding the minimum-norm matrix. +// +// The matrix A is a general matrix of size m×n and is modified during this call. +// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, +// the elements of b specify the input matrix B. B has size m×nrhs if +// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the +// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, +// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic +// otherwise. A longer work will enable blocked algorithms to be called. +// In the special case that lwork == -1, work[0] will be set to the optimal working +// length. +func Gels(trans blas.Transpose, a blas64.General, b blas64.General, work []float64, lwork int) bool { + return lapack64.Dgels(trans, a.Rows, a.Cols, b.Cols, a.Data, a.Stride, b.Data, b.Stride, work, lwork) +} + +// Geqrf computes the QR factorization of the m×n matrix A using a blocked +// algorithm. A is modified to contain the information to construct Q and R. +// The upper triangle of a contains the matrix R. The lower triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length at least min(m,n), and +// this function will panic otherwise. +// +// The ith elementary reflector can be explicitly constructed by first extracting +// the +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// and computing H_i = I - tau[i] * v * v^T. +// +// The orthonormal matrix Q can be constucted from a product of these elementary +// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m and this function will panic otherwise. +// Geqrf is a blocked QR factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Geqrf, +// the optimal work length will be stored into work[0]. +func Geqrf(a blas64.General, tau, work []float64, lwork int) { + lapack64.Dgeqrf(a.Rows, a.Cols, a.Data, a.Stride, tau, work, lwork) +} + +// Gelqf computes the LQ factorization of the m×n matrix A using a blocked +// algorithm. A is modified to contain the information to construct L and Q. The +// lower triangle of a contains the matrix L. The elements above the diagonal +// and the slice tau represent the matrix Q. tau is modified to contain the +// reflector scales. tau must have length at least min(m,n), and this function +// will panic otherwise. +// +// See Geqrf for a description of the elementary reflectors and orthonormal +// matrix Q. Q is constructed as a product of these elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m and this function will panic otherwise. +// Gelqf is a blocked LQ factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Gelqf, +// the optimal work length will be stored into work[0]. +func Gelqf(a blas64.General, tau, work []float64, lwork int) { + lapack64.Dgelqf(a.Rows, a.Cols, a.Data, a.Stride, tau, work, lwork) +} + +// Gesvd computes the singular value decomposition of the input matrix A. +// +// The singular value decomposition is +// A = U * Sigma * V^T +// where Sigma is an m×n diagonal matrix containing the singular values of A, +// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first +// min(m,n) columns of U and V are the left and right singular vectors of A +// respectively. +// +// jobU and jobVT are options for computing the singular vectors. The behavior +// is as follows +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDInPlace The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// The behavior is the same for jobVT and the rows of V^T. At most one of jobU +// and jobVT can equal lapack.SVDOverwrite, and Gesvd will panic otherwise. +// +// On entry, a contains the data for the m×n matrix A. During the call to Gesvd +// the data is overwritten. On exit, A contains the appropriate singular vectors +// if either job is lapack.SVDOverwrite. +// +// s is a slice of length at least min(m,n) and on exit contains the singular +// values in decreasing order. +// +// u contains the left singular vectors on exit, stored columnwise. If +// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDInPlace u is +// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is +// not used. +// +// vt contains the left singular vectors on exit, stored rowwise. If +// jobV == lapack.SVDAll, vt is of size n×m. If jobVT == lapack.SVDInPlace vt is +// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is +// not used. +// +// work is a slice for storing temporary memory, and lwork is the usable size of +// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). +// If lwork == -1, instead of performing Gesvd, the optimal work length will be +// stored into work[0]. Gesvd will panic if the working memory has insufficient +// storage. +// +// Gesvd returns whether the decomposition successfully completed. +func Gesvd(jobU, jobVT lapack.SVDJob, a, u, vt blas64.General, s, work []float64, lwork int) (ok bool) { + return lapack64.Dgesvd(jobU, jobVT, a.Rows, a.Cols, a.Data, a.Stride, s, u.Data, u.Stride, vt.Data, vt.Stride, work, lwork) +} + +// Getrf computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of A into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Getrf is the blocked version of the algorithm. +// +// Getrf returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +func Getrf(a blas64.General, ipiv []int) bool { + return lapack64.Dgetrf(a.Rows, a.Cols, a.Data, a.Stride, ipiv) +} + +// Getri computes the inverse of the matrix A using the LU factorization computed +// by Getrf. On entry, a contains the PLU decomposition of A as computed by +// Getrf and on exit contains the reciprocal of the original matrix. +// +// Getri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= n and this function will panic otherwise. +// Getri is a blocked inversion, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Getri, +// the optimal work length will be stored into work[0]. +func Getri(a blas64.General, ipiv []int, work []float64, lwork int) (ok bool) { + return lapack64.Dgetri(a.Cols, a.Data, a.Stride, ipiv, work, lwork) +} + +// Getrs solves a system of equations using an LU factorization. +// The system of equations solved is +// A * X = B if trans == blas.Trans +// A^T * X = B if trans == blas.NoTrans +// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. +// +// On entry b contains the elements of the matrix B. On exit, b contains the +// elements of X, the solution to the system of equations. +// +// a and ipiv contain the LU factorization of A and the permutation indices as +// computed by Getrf. ipiv is zero-indexed. +func Getrs(trans blas.Transpose, a blas64.General, b blas64.General, ipiv []int) { + lapack64.Dgetrs(trans, a.Cols, b.Cols, a.Data, a.Stride, ipiv, b.Data, b.Stride) +} + +// Ggsvd3 computes the generalized singular value decomposition (GSVD) +// of an m×n matrix A and p×n matrix B: +// U^T*A*Q = D1*[ 0 R ] +// +// V^T*B*Q = D2*[ 0 R ] +// where U, V and Q are orthogonal matrices. +// +// Ggsvd3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and +// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following +// structures, respectively: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// Ggsvd3 computes C, S, R, and optionally the orthogonal transformation +// matrices U, V and Q. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// alpha and beta must have length n or Ggsvd3 will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, iwork contains the permutation required to sort alpha descending. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than n, otherwise Ggsvd3 will panic. If +// lwork is -1, work[0] holds the optimal lwork on return, but Ggsvd3 does +// not perform the GSVD. +func Ggsvd3(jobU, jobV, jobQ lapack.GSVDJob, a, b blas64.General, alpha, beta []float64, u, v, q blas64.General, work []float64, lwork int, iwork []int) (k, l int, ok bool) { + return lapack64.Dggsvd3(jobU, jobV, jobQ, a.Rows, a.Cols, b.Rows, a.Data, a.Stride, b.Data, b.Stride, alpha, beta, u.Data, u.Stride, v.Data, v.Stride, q.Data, q.Stride, work, lwork, iwork) +} + +// Lange computes the matrix norm of the general m×n matrix A. The input norm +// specifies the norm computed. +// lapack.MaxAbs: the maximum absolute value of an element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. +// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. +// lapack.Frobenius: the square root of the sum of the squares of the entries. +// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func Lange(norm lapack.MatrixNorm, a blas64.General, work []float64) float64 { + return lapack64.Dlange(norm, a.Rows, a.Cols, a.Data, a.Stride, work) +} + +// Lansy computes the specified norm of an n×n symmetric matrix. If +// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length +// at least n and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func Lansy(norm lapack.MatrixNorm, a blas64.Symmetric, work []float64) float64 { + return lapack64.Dlansy(norm, a.Uplo, a.N, a.Data, a.Stride, work) +} + +// Lantr computes the specified norm of an m×n trapezoidal matrix A. If +// norm == lapack.MaxColumnSum work must have length at least n and this function +// will panic otherwise. There are no restrictions on work for the other matrix norms. +func Lantr(norm lapack.MatrixNorm, a blas64.Triangular, work []float64) float64 { + return lapack64.Dlantr(norm, a.Uplo, a.Diag, a.N, a.N, a.Data, a.Stride, work) +} + +// Lapmt rearranges the columns of the m×n matrix X as specified by the +// permutation k_0, k_1, ..., k_{n-1} of the integers 0, ..., n-1. +// +// If forward is true a forward permutation is performed: +// +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// +// otherwise a backward permutation is performed: +// +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// +// k must have length n, otherwise Lapmt will panic. k is zero-indexed. +func Lapmt(forward bool, x blas64.General, k []int) { + lapack64.Dlapmt(forward, x.Rows, x.Cols, x.Data, x.Stride, k) +} + +// Ormlq multiplies the matrix C by the othogonal matrix Q defined by +// A and tau. A and tau are as returned from Gelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right +// A is of size k×n. This uses a blocked algorithm. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, +// and this function will panic otherwise. +// Ormlq uses a block algorithm, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Ormlq, +// the optimal work length will be stored into work[0]. +// +// Tau contains the Householder scales and must have length at least k, and +// this function will panic otherwise. +func Ormlq(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { + lapack64.Dormlq(side, trans, c.Rows, c.Cols, a.Rows, a.Data, a.Stride, tau, c.Data, c.Stride, work, lwork) +} + +// Ormqr multiplies an m×n matrix C by an orthogonal matrix Q as +// C = Q * C, if side == blas.Left and trans == blas.NoTrans, +// C = Q^T * C, if side == blas.Left and trans == blas.Trans, +// C = C * Q, if side == blas.Right and trans == blas.NoTrans, +// C = C * Q^T, if side == blas.Right and trans == blas.Trans, +// where Q is defined as the product of k elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1}. +// +// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. +// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. +// The ith column of A contains the vector which defines the elementary +// reflector H_i and tau[i] contains its scalar factor. tau must have length k +// and Ormqr will panic otherwise. Geqrf returns A and tau in the required +// form. +// +// work must have length at least max(1,lwork), and lwork must be at least n if +// side == blas.Left and at least m if side == blas.Right, otherwise Ormqr will +// panic. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= m if side == blas.Left and lwork >= n if side == +// blas.Right, and this function will panic otherwise. Larger values of lwork +// will generally give better performance. On return, work[0] will contain the +// optimal value of lwork. +// +// If lwork is -1, instead of performing Ormqr, the optimal workspace size will +// be stored into work[0]. +func Ormqr(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { + lapack64.Dormqr(side, trans, c.Rows, c.Cols, a.Cols, a.Data, a.Stride, tau, c.Data, c.Stride, work, lwork) +} + +// Pocon estimates the reciprocal of the condition number of a positive-definite +// matrix A given the Cholesky decmposition of A. The condition number computed +// is based on the 1-norm and the ∞-norm. +// +// anorm is the 1-norm and the ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 3*n and Pocon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Pocon will panic otherwise. +func Pocon(a blas64.Symmetric, anorm float64, work []float64, iwork []int) float64 { + return lapack64.Dpocon(a.Uplo, a.N, a.Data, a.Stride, anorm, work, iwork) +} + +// Syev computes all eigenvalues and, optionally, the eigenvectors of a real +// symmetric matrix A. +// +// w contains the eigenvalues in ascending order upon return. w must have length +// at least n, and Syev will panic otherwise. +// +// On entry, a contains the elements of the symmetric matrix A in the triangular +// portion specified by uplo. If jobz == lapack.ComputeEV a contains the +// orthonormal eigenvectors of A on exit, otherwise on exit the specified +// triangular region is overwritten. +// +// Work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 3*n-1, and Syev will panic otherwise. The amount of blocking is +// limited by the usable length. If lwork == -1, instead of computing Syev the +// optimal work length is stored into work[0]. +func Syev(jobz lapack.EVJob, a blas64.Symmetric, w, work []float64, lwork int) (ok bool) { + return lapack64.Dsyev(jobz, a.Uplo, a.N, a.Data, a.Stride, w, work, lwork) +} + +// Trcon estimates the reciprocal of the condition number of a triangular matrix A. +// The condition number computed may be based on the 1-norm or the ∞-norm. +// +// work is a temporary data slice of length at least 3*n and Trcon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Trcon will panic otherwise. +func Trcon(norm lapack.MatrixNorm, a blas64.Triangular, work []float64, iwork []int) float64 { + return lapack64.Dtrcon(norm, a.Uplo, a.Diag, a.N, a.Data, a.Stride, work, iwork) +} + +// Trtri computes the inverse of a triangular matrix, storing the result in place +// into a. +// +// Trtri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +func Trtri(a blas64.Triangular) (ok bool) { + return lapack64.Dtrtri(a.Uplo, a.Diag, a.N, a.Data, a.Stride) +} + +// Trtrs solves a triangular system of the form A * X = B or A^T * X = B. Trtrs +// returns whether the solve completed successfully. If A is singular, no solve is performed. +func Trtrs(trans blas.Transpose, a blas64.Triangular, b blas64.General) (ok bool) { + return lapack64.Dtrtrs(a.Uplo, trans, a.Diag, a.N, b.Cols, a.Data, a.Stride, b.Data, b.Stride) +} + +// Geev computes the eigenvalues and, optionally, the left and/or right +// eigenvectors for an n×n real nonsymmetric matrix A. +// +// The right eigenvector v_j of A corresponding to an eigenvalue λ_j +// is defined by +// A v_j = λ_j v_j, +// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by +// u_j^H A = λ_j u_j^H, +// where u_j^H is the conjugate transpose of u_j. +// +// On return, A will be overwritten and the left and right eigenvectors will be +// stored, respectively, in the columns of the n×n matrices VL and VR in the +// same order as their eigenvalues. If the j-th eigenvalue is real, then +// u_j = VL[:,j], +// v_j = VR[:,j], +// and if it is not real, then j and j+1 form a complex conjugate pair and the +// eigenvectors can be recovered as +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// where i is the imaginary unit. The computed eigenvectors are normalized to +// have Euclidean norm equal to 1 and largest component real. +// +// Left eigenvectors will be computed only if jobvl == lapack.ComputeLeftEV, +// otherwise jobvl must be lapack.None. +// Right eigenvectors will be computed only if jobvr == lapack.ComputeRightEV, +// otherwise jobvr must be lapack.None. +// For other values of jobvl and jobvr Geev will panic. +// +// On return, wr and wi will contain the real and imaginary parts, respectively, +// of the computed eigenvalues. Complex conjugate pairs of eigenvalues appear +// consecutively with the eigenvalue having the positive imaginary part first. +// wr and wi must have length n, and Geev will panic otherwise. +// +// work must have length at least lwork and lwork must be at least max(1,4*n) if +// the left or right eigenvectors are computed, and at least max(1,3*n) if no +// eigenvectors are computed. For good performance, lwork must generally be +// larger. On return, optimal value of lwork will be stored in work[0]. +// +// If lwork == -1, instead of performing Geev, the function only calculates the +// optimal vaule of lwork and stores it into work[0]. +// +// On return, first will be the index of the first valid eigenvalue. +// If first == 0, all eigenvalues and eigenvectors have been computed. +// If first is positive, Geev failed to compute all the eigenvalues, no +// eigenvectors have been computed and wr[first:] and wi[first:] contain those +// eigenvalues which have converged. +func Geev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, a blas64.General, wr, wi []float64, vl, vr blas64.General, work []float64, lwork int) (first int) { + n := a.Rows + if a.Cols != n { + panic("lapack64: matrix not square") + } + if jobvl == lapack.ComputeLeftEV && (vl.Rows != n || vl.Cols != n) { + panic("lapack64: bad size of VL") + } + if jobvr == lapack.ComputeRightEV && (vr.Rows != n || vr.Cols != n) { + panic("lapack64: bad size of VR") + } + return lapack64.Dgeev(jobvl, jobvr, n, a.Data, a.Stride, wr, wi, vl.Data, vl.Stride, vr.Data, vr.Stride, work, lwork) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/README.md b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/README.md new file mode 100644 index 00000000..0f77e470 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/README.md @@ -0,0 +1,3 @@ +# Gonum matrix [![GoDoc](https://godoc.org/gonum.org/v1/gonum/mat?status.svg)](https://godoc.org/gonum.org/v1/gonum/mat) + +Package mat is a matrix package for the Go language. diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/band.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/band.go new file mode 100644 index 00000000..d196a5ae --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/band.go @@ -0,0 +1,228 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + bandDense *BandDense + _ Matrix = bandDense + _ Banded = bandDense + _ RawBander = bandDense + + _ NonZeroDoer = bandDense + _ RowNonZeroDoer = bandDense + _ ColNonZeroDoer = bandDense +) + +// BandDense represents a band matrix in dense storage format. +type BandDense struct { + mat blas64.Band +} + +// Banded is a band matrix representation. +type Banded interface { + Matrix + // Bandwidth returns the lower and upper bandwidth values for + // the matrix. The total bandwidth of the matrix is kl+ku+1. + Bandwidth() (kl, ku int) + + // TBand is the equivalent of the T() method in the Matrix + // interface but guarantees the transpose is of banded type. + TBand() Banded +} + +// A RawBander can return a blas64.Band representation of the receiver. +// Changes to the blas64.Band.Data slice will be reflected in the original +// matrix, changes to the Rows, Cols, KL, KU and Stride fields will not. +type RawBander interface { + RawBand() blas64.Band +} + +// A MutableBanded can set elements of a band matrix. +type MutableBanded interface { + Banded + SetBand(i, j int, v float64) +} + +var ( + _ Matrix = TransposeBand{} + _ Banded = TransposeBand{} + _ UntransposeBander = TransposeBand{} +) + +// TransposeBand is a type for performing an implicit transpose of a band +// matrix. It implements the Banded interface, returning values from the +// transpose of the matrix within. +type TransposeBand struct { + Banded Banded +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Banded field. +func (t TransposeBand) At(i, j int) float64 { + return t.Banded.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. +func (t TransposeBand) Dims() (r, c int) { + c, r = t.Banded.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Banded field. +func (t TransposeBand) T() Matrix { + return t.Banded +} + +// Bandwidth returns the lower and upper bandwidth values for +// the transposed matrix. +func (t TransposeBand) Bandwidth() (kl, ku int) { + kl, ku = t.Banded.Bandwidth() + return ku, kl +} + +// TBand performs an implicit transpose by returning the Banded field. +func (t TransposeBand) TBand() Banded { + return t.Banded +} + +// Untranspose returns the Banded field. +func (t TransposeBand) Untranspose() Matrix { + return t.Banded +} + +// UntransposeBand returns the Banded field. +func (t TransposeBand) UntransposeBand() Banded { + return t.Banded +} + +// NewBandDense creates a new Band matrix with r rows and c columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == min(r, c+kl)*(kl+ku+1), +// data is used as the backing slice, and changes to the elements of the returned +// BandDense will be reflected in data. If neither of these is true, NewBandDense +// will panic. kl must be at least zero and less r, and ku must be at least zero and +// less than c, otherwise NewBandDense will panic. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. For example, the matrix +// 1 2 3 0 0 0 +// 4 5 6 7 0 0 +// 0 8 9 10 11 0 +// 0 0 12 13 14 15 +// 0 0 0 16 17 18 +// 0 0 0 0 19 20 +// becomes (* entries are never accessed) +// * 1 2 3 +// 4 5 6 7 +// 8 9 10 11 +// 12 13 14 15 +// 16 17 18 * +// 19 20 * * +// which is passed to NewBandDense as []float64{*, 1, 2, 3, 4, ...} with kl=1 and ku=2. +// Only the values in the band portion of the matrix are used. +func NewBandDense(r, c, kl, ku int, data []float64) *BandDense { + if r < 0 || c < 0 || kl < 0 || ku < 0 { + panic("mat: negative dimension") + } + if kl+1 > r || ku+1 > c { + panic("mat: band out of range") + } + bc := kl + ku + 1 + if data != nil && len(data) != min(r, c+kl)*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, min(r, c+kl)*bc) + } + return &BandDense{ + mat: blas64.Band{ + Rows: r, + Cols: c, + KL: kl, + KU: ku, + Stride: bc, + Data: data, + }, + } +} + +// NewDiagonalRect is a convenience function that returns a diagonal matrix represented by a +// BandDense. The length of data must be min(r, c) otherwise NewDiagonalRect will panic. +func NewDiagonalRect(r, c int, data []float64) *BandDense { + return NewBandDense(r, c, 0, 0, data) +} + +// Dims returns the number of rows and columns in the matrix. +func (b *BandDense) Dims() (r, c int) { + return b.mat.Rows, b.mat.Cols +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +func (b *BandDense) Bandwidth() (kl, ku int) { + return b.mat.KL, b.mat.KU +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (b *BandDense) T() Matrix { + return Transpose{b} +} + +// TBand performs an implicit transpose by returning the receiver inside a TransposeBand. +func (b *BandDense) TBand() Banded { + return TransposeBand{b} +} + +// RawBand returns the underlying blas64.Band used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.Band. +func (b *BandDense) RawBand() blas64.Band { + return b.mat +} + +// DoNonZero calls the function fn for each of the non-zero elements of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoNonZero(fn func(i, j int, v float64)) { + for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { + for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || b.mat.Rows <= i { + panic(ErrRowAccess) + } + for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || b.mat.Cols <= j { + panic(ErrColAccess) + } + for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { + if i-b.mat.KL <= j && j < i+b.mat.KU+1 { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/cholesky.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/cholesky.go new file mode 100644 index 00000000..399c8114 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/cholesky.go @@ -0,0 +1,585 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badTriangle = "mat: invalid triangle" + badCholesky = "mat: invalid Cholesky factorization" +) + +// Cholesky is a type for creating and using the Cholesky factorization of a +// symmetric positive definite matrix. +// +// Cholesky methods may only be called on a value that has been successfully +// initialized by a call to Factorize that has returned true. Calls to methods +// of an unsuccessful Cholesky factorization will panic. +type Cholesky struct { + // The chol pointer must never be retained as a pointer outside the Cholesky + // struct, either by returning chol outside the struct or by setting it to + // a pointer coming from outside. The same prohibition applies to the data + // slice within chol. + chol *TriDense + cond float64 +} + +// updateCond updates the condition number of the Cholesky decomposition. If +// norm > 0, then that norm is used as the norm of the original matrix A, otherwise +// the norm is estimated from the decomposition. +func (c *Cholesky) updateCond(norm float64) { + n := c.chol.mat.N + work := getFloats(3*n, false) + defer putFloats(work) + if norm < 0 { + // This is an approximation. By the definition of a norm, + // |AB| <= |A| |B|. + // Since A = U^T*U, we get for the condition number κ that + // κ(A) := |A| |A^-1| = |U^T*U| |A^-1| <= |U^T| |U| |A^-1|, + // so this will overestimate the condition number somewhat. + // The norm of the original factorized matrix cannot be stored + // because of update possibilities. + unorm := lapack64.Lantr(CondNorm, c.chol.mat, work) + lnorm := lapack64.Lantr(CondNormTrans, c.chol.mat, work) + norm = unorm * lnorm + } + sym := c.chol.asSymBlas() + iwork := getInts(n, false) + v := lapack64.Pocon(sym, norm, work, iwork) + putInts(iwork) + c.cond = 1 / v +} + +// Cond returns the condition number of the factorized matrix. +func (c *Cholesky) Cond() float64 { + return c.cond +} + +// Factorize calculates the Cholesky decomposition of the matrix A and returns +// whether the matrix is positive definite. If Factorize returns false, the +// factorization must not be used. +func (c *Cholesky) Factorize(a Symmetric) (ok bool) { + n := a.Symmetric() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + copySymIntoTriangle(c.chol, a) + + sym := c.chol.asSymBlas() + work := getFloats(c.chol.mat.N, false) + norm := lapack64.Lansy(CondNorm, sym, work) + putFloats(work) + _, ok = lapack64.Potrf(sym) + if ok { + c.updateCond(norm) + } else { + c.Reset() + } + return ok +} + +// Reset resets the factorization so that it can be reused as the receiver of a +// dimensionally restricted operation. +func (c *Cholesky) Reset() { + if c.chol != nil { + c.chol.Reset() + } + c.cond = math.Inf(1) +} + +// SetFromU sets the Cholesky decomposition from the given triangular matrix. +// SetFromU panics if t is not upper triangular. Note that t is copied into, +// not stored inside, the receiver. +func (c *Cholesky) SetFromU(t *TriDense) { + n, kind := t.Triangle() + if kind != Upper { + panic("cholesky: matrix must be upper triangular") + } + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + c.chol.Copy(t) + c.updateCond(-1) +} + +// Clone makes a copy of the input Cholesky into the receiver, overwriting the +// previous value of the receiver. Clone does not place any restrictions on receiver +// shape. Clone panics if the input Cholesky is not the result of a valid decomposition. +func (c *Cholesky) Clone(chol *Cholesky) { + if !chol.valid() { + panic(badCholesky) + } + n := chol.Size() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + c.chol.Copy(chol.chol) + c.cond = chol.cond +} + +// Size returns the dimension of the factorized matrix. +func (c *Cholesky) Size() int { + if !c.valid() { + panic(badCholesky) + } + return c.chol.mat.N +} + +// Det returns the determinant of the matrix that has been factorized. +func (c *Cholesky) Det() float64 { + if !c.valid() { + panic(badCholesky) + } + return math.Exp(c.LogDet()) +} + +// LogDet returns the log of the determinant of the matrix that has been factorized. +func (c *Cholesky) LogDet() float64 { + if !c.valid() { + panic(badCholesky) + } + var det float64 + for i := 0; i < c.chol.mat.N; i++ { + det += 2 * math.Log(c.chol.mat.Data[i*c.chol.mat.Stride+i]) + } + return det +} + +// Solve finds the matrix x that solves A * X = B where A is represented +// by the Cholesky decomposition, placing the result in x. +func (c *Cholesky) Solve(x *Dense, b Matrix) error { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + bm, bn := b.Dims() + if n != bm { + panic(ErrShape) + } + + x.reuseAs(bm, bn) + if b != x { + x.Copy(b) + } + blas64.Trsm(blas.Left, blas.Trans, 1, c.chol.mat, x.mat) + blas64.Trsm(blas.Left, blas.NoTrans, 1, c.chol.mat, x.mat) + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil +} + +// SolveChol finds the matrix x that solves A * X = B where A and B are represented +// by their Cholesky decompositions a and b, placing the result in x. +func (a *Cholesky) SolveChol(x *Dense, b *Cholesky) error { + if !a.valid() || !b.valid() { + panic(badCholesky) + } + bn := b.chol.mat.N + if a.chol.mat.N != bn { + panic(ErrShape) + } + + x.reuseAsZeroed(bn, bn) + x.Copy(b.chol.T()) + blas64.Trsm(blas.Left, blas.Trans, 1, a.chol.mat, x.mat) + blas64.Trsm(blas.Left, blas.NoTrans, 1, a.chol.mat, x.mat) + blas64.Trmm(blas.Right, blas.NoTrans, 1, b.chol.mat, x.mat) + if a.cond > ConditionTolerance { + return Condition(a.cond) + } + return nil +} + +// SolveVec finds the vector x that solves A * x = b where A is represented +// by the Cholesky decomposition, placing the result in x. +func (c *Cholesky) SolveVec(x *VecDense, b Vector) error { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if br, bc := b.Dims(); br != n || bc != 1 { + panic(ErrShape) + } + switch rv := b.(type) { + default: + x.reuseAs(n) + return c.Solve(x.asDense(), b) + case RawVectorer: + bmat := rv.RawVector() + if x != b { + x.checkOverlap(bmat) + } + x.reuseAs(n) + if x != b { + x.CopyVec(b) + } + blas64.Trsv(blas.Trans, c.chol.mat, x.mat) + blas64.Trsv(blas.NoTrans, c.chol.mat, x.mat) + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil + } +} + +// RawU returns the Triangular matrix used to store the Cholesky decomposition of +// the original matrix A. The returned matrix should not be modified. If it is +// modified, the decomposition is invalid and should not be used. +func (c *Cholesky) RawU() Triangular { + return c.chol +} + +// UTo extracts the n×n upper triangular matrix U from a Cholesky +// decomposition into dst and returns the result. If dst is nil a new +// TriDense is allocated. +// A = U^T * U. +func (c *Cholesky) UTo(dst *TriDense) *TriDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewTriDense(n, Upper, make([]float64, n*n)) + } else { + dst.reuseAs(n, Upper) + } + dst.Copy(c.chol) + return dst +} + +// LTo extracts the n×n lower triangular matrix L from a Cholesky +// decomposition into dst and returns the result. If dst is nil a new +// TriDense is allocated. +// A = L * L^T. +func (c *Cholesky) LTo(dst *TriDense) *TriDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewTriDense(n, Lower, make([]float64, n*n)) + } else { + dst.reuseAs(n, Lower) + } + dst.Copy(c.chol.TTri()) + return dst +} + +// ToSym reconstructs the original positive definite matrix given its +// Cholesky decomposition into dst and returns the result. If dst is nil +// a new SymDense is allocated. +func (c *Cholesky) ToSym(dst *SymDense) *SymDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewSymDense(n, make([]float64, n*n)) + } else { + dst.reuseAs(n) + } + dst.SymOuterK(1, c.chol.T()) + return dst +} + +// InverseTo computes the inverse of the matrix represented by its Cholesky +// factorization and stores the result into s. If the factorized +// matrix is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally be +// avoided where possible, for example by using the Solve routines. +func (c *Cholesky) InverseTo(s *SymDense) error { + if !c.valid() { + panic(badCholesky) + } + // TODO(btracey): Replace this code with a direct call to Dpotri when it + // is available. + s.reuseAs(c.chol.mat.N) + // If: + // chol(A) = U^T * U + // Then: + // chol(A^-1) = S * S^T + // where S = U^-1 + var t TriDense + err := t.InverseTri(c.chol) + s.SymOuterK(1, &t) + return err +} + +// Scale multiplies the original matrix A by a positive constant using +// its Cholesky decomposition, storing the result in-place into the receiver. +// That is, if the original Cholesky factorization is +// U^T * U = A +// the updated factorization is +// U'^T * U' = f A = A' +// Scale panics if the constant is non-positive, or if the receiver is non-zero +// and is of a different Size from the input. +func (c *Cholesky) Scale(f float64, orig *Cholesky) { + if !orig.valid() { + panic(badCholesky) + } + if f <= 0 { + panic("cholesky: scaling by a non-positive constant") + } + n := orig.Size() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else if c.chol.mat.N != n { + panic(ErrShape) + } + c.chol.ScaleTri(math.Sqrt(f), orig.chol) + c.cond = orig.cond // Scaling by a positive constant does not change the condition number. +} + +// ExtendVecSym computes the Cholesky decomposition of the original matrix A, +// whose Cholesky decomposition is in a, extended by a the n×1 vector v according to +// [A w] +// [w' k] +// where k = v[n-1] and w = v[:n-1]. The result is stored into the receiver. +// In order for the updated matrix to be positive definite, it must be the case +// that k > w' A^-1 w. If this condition does not hold then ExtendVecSym will +// return false and the receiver will not be updated. +// +// ExtendVecSym will panic if v.Len() != a.Size()+1 or if a does not contain +// a valid decomposition. +func (chol *Cholesky) ExtendVecSym(a *Cholesky, v Vector) (ok bool) { + n := a.Size() + if v.Len() != n+1 { + panic(badSliceLength) + } + if !a.valid() { + panic(badCholesky) + } + + // The algorithm is commented here, but see also + // https://math.stackexchange.com/questions/955874/cholesky-factor-when-adding-a-row-and-column-to-already-factorized-matrix + // We have A and want to compute the Cholesky of + // [A w] + // [w' k] + // We want + // [U c] + // [0 d] + // to be the updated Cholesky, and so it must be that + // [A w] = [U' 0] [U c] + // [w' k] [c' d] [0 d] + // Thus, we need + // 1) A = U'U (true by the original decomposition being valid), + // 2) U' * c = w => c = U'^-1 w + // 3) c'*c + d'*d = k => d = sqrt(k-c'*c) + + // First, compute c = U'^-1 a + // TODO(btracey): Replace this with CopyVec when issue 167 is fixed. + w := NewVecDense(n, nil) + for i := 0; i < n; i++ { + w.SetVec(i, v.At(i, 0)) + } + k := v.At(n, 0) + + c := NewVecDense(n, nil) + c.SolveVec(a.chol.T(), w) + + dot := Dot(c, c) + if dot >= k { + return false + } + d := math.Sqrt(k - dot) + + newU := NewTriDense(n+1, Upper, nil) + newU.Copy(a.chol) + for i := 0; i < n; i++ { + newU.SetTri(i, n, c.At(i, 0)) + } + newU.SetTri(n, n, d) + chol.chol = newU + chol.updateCond(-1) + return true +} + +// SymRankOne performs a rank-1 update of the original matrix A and refactorizes +// its Cholesky factorization, storing the result into the receiver. That is, if +// in the original Cholesky factorization +// U^T * U = A, +// in the updated factorization +// U'^T * U' = A + alpha * x * x^T = A'. +// +// Note that when alpha is negative, the updating problem may be ill-conditioned +// and the results may be inaccurate, or the updated matrix A' may not be +// positive definite and not have a Cholesky factorization. SymRankOne returns +// whether the updated matrix A' is positive definite. +// +// SymRankOne updates a Cholesky factorization in O(n²) time. The Cholesky +// factorization computation from scratch is O(n³). +func (c *Cholesky) SymRankOne(orig *Cholesky, alpha float64, x Vector) (ok bool) { + if !orig.valid() { + panic(badCholesky) + } + n := orig.Size() + if r, c := x.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if orig != c { + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else if c.chol.mat.N != n { + panic(ErrShape) + } + c.chol.Copy(orig.chol) + } + + if alpha == 0 { + return true + } + + // Algorithms for updating and downdating the Cholesky factorization are + // described, for example, in + // - J. J. Dongarra, J. R. Bunch, C. B. Moler, G. W. Stewart: LINPACK + // Users' Guide. SIAM (1979), pages 10.10--10.14 + // or + // - P. E. Gill, G. H. Golub, W. Murray, and M. A. Saunders: Methods for + // modifying matrix factorizations. Mathematics of Computation 28(126) + // (1974), Method C3 on page 521 + // + // The implementation is based on LINPACK code + // http://www.netlib.org/linpack/dchud.f + // http://www.netlib.org/linpack/dchdd.f + // and + // https://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646 + // + // According to http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00301.html + // LINPACK is released under BSD license. + // + // See also: + // - M. A. Saunders: Large-scale Linear Programming Using the Cholesky + // Factorization. Technical Report Stanford University (1972) + // http://i.stanford.edu/pub/cstr/reports/cs/tr/72/252/CS-TR-72-252.pdf + // - Matthias Seeger: Low rank updates for the Cholesky decomposition. + // EPFL Technical Report 161468 (2004) + // http://infoscience.epfl.ch/record/161468 + + work := getFloats(n, false) + defer putFloats(work) + var xmat blas64.Vector + if rv, ok := x.(RawVectorer); ok { + xmat = rv.RawVector() + } else { + var tmp *VecDense + tmp.CopyVec(x) + xmat = tmp.RawVector() + } + blas64.Copy(n, xmat, blas64.Vector{1, work}) + + if alpha > 0 { + // Compute rank-1 update. + if alpha != 1 { + blas64.Scal(n, math.Sqrt(alpha), blas64.Vector{1, work}) + } + umat := c.chol.mat + stride := umat.Stride + for i := 0; i < n; i++ { + // Compute parameters of the Givens matrix that zeroes + // the i-th element of x. + c, s, r, _ := blas64.Rotg(umat.Data[i*stride+i], work[i]) + if r < 0 { + // Multiply by -1 to have positive diagonal + // elemnts. + r *= -1 + c *= -1 + s *= -1 + } + umat.Data[i*stride+i] = r + if i < n-1 { + // Multiply the extended factorization matrix by + // the Givens matrix from the left. Only + // the i-th row and x are modified. + blas64.Rot(n-i-1, + blas64.Vector{1, umat.Data[i*stride+i+1 : i*stride+n]}, + blas64.Vector{1, work[i+1 : n]}, + c, s) + } + } + c.updateCond(-1) + return true + } + + // Compute rank-1 downdate. + alpha = math.Sqrt(-alpha) + if alpha != 1 { + blas64.Scal(n, alpha, blas64.Vector{1, work}) + } + // Solve U^T * p = x storing the result into work. + ok = lapack64.Trtrs(blas.Trans, c.chol.RawTriangular(), blas64.General{ + Rows: n, + Cols: 1, + Stride: 1, + Data: work, + }) + if !ok { + // The original matrix is singular. Should not happen, because + // the factorization is valid. + panic(badCholesky) + } + norm := blas64.Nrm2(n, blas64.Vector{1, work}) + if norm >= 1 { + // The updated matrix is not positive definite. + return false + } + norm = math.Sqrt((1 + norm) * (1 - norm)) + cos := getFloats(n, false) + defer putFloats(cos) + sin := getFloats(n, false) + defer putFloats(sin) + for i := n - 1; i >= 0; i-- { + // Compute parameters of Givens matrices that zero elements of p + // backwards. + cos[i], sin[i], norm, _ = blas64.Rotg(norm, work[i]) + if norm < 0 { + norm *= -1 + cos[i] *= -1 + sin[i] *= -1 + } + } + umat := c.chol.mat + stride := umat.Stride + for i := n - 1; i >= 0; i-- { + work[i] = 0 + // Apply Givens matrices to U. + // TODO(vladimir-ch): Use workspace to avoid modifying the + // receiver in case an invalid factorization is created. + blas64.Rot(n-i, blas64.Vector{1, work[i:n]}, blas64.Vector{1, umat.Data[i*stride+i : i*stride+n]}, cos[i], sin[i]) + if umat.Data[i*stride+i] == 0 { + // The matrix is singular (may rarely happen due to + // floating-point effects?). + ok = false + } else if umat.Data[i*stride+i] < 0 { + // Diagonal elements should be positive. If it happens + // that on the i-th row the diagonal is negative, + // multiply U from the left by an identity matrix that + // has -1 on the i-th row. + blas64.Scal(n-i, -1, blas64.Vector{1, umat.Data[i*stride+i : i*stride+n]}) + } + } + if ok { + c.updateCond(-1) + } else { + c.Reset() + } + return ok +} + +func (c *Cholesky) valid() bool { + return c.chol != nil && !c.chol.IsZero() +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/cmatrix.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/cmatrix.go new file mode 100644 index 00000000..fa8e135b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/cmatrix.go @@ -0,0 +1,71 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +// CMatrix is the basic matrix interface type for complex matrices. +type CMatrix interface { + // Dims returns the dimensions of a Matrix. + Dims() (r, c int) + + // At returns the value of a matrix element at row i, column j. + // It will panic if i or j are out of bounds for the matrix. + At(i, j int) complex128 + + // H returns the conjugate transpose of the Matrix. Whether H + // returns a copy of the underlying data is implementation dependent. + // This method may be implemented using the Conjugate type, which + // provides an implicit matrix conjugate transpose. + H() CMatrix +} + +var ( + _ CMatrix = Conjugate{} + _ Unconjugator = Conjugate{} +) + +// Conjugate is a type for performing an implicit matrix conjugate transpose. +// It implements the Matrix interface, returning values from the conjugate +// transpose of the matrix within. +type Conjugate struct { + CMatrix CMatrix +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Matrix field. +func (t Conjugate) At(i, j int) complex128 { + z := t.CMatrix.At(j, i) + return complex(real(z), -imag(z)) +} + +// Dims returns the dimensions of the transposed matrix. The number of rows returned +// is the number of columns in the Matrix field, and the number of columns is +// the number of rows in the Matrix field. +func (t Conjugate) Dims() (r, c int) { + c, r = t.CMatrix.Dims() + return r, c +} + +// H performs an implicit conjugate transpose by returning the Matrix field. +func (t Conjugate) H() CMatrix { + return t.CMatrix +} + +// Unconjugate returns the Matrix field. +func (t Conjugate) Unconjugate() CMatrix { + return t.CMatrix +} + +// Unconjugator is a type that can undo an implicit conjugate transpose. +type Unconjugator interface { + // Note: This interface is needed to unify all of the Conjugate types. In + // the cmat128 methods, we need to test if the Matrix has been implicitly + // transposed. If this is checked by testing for the specific Conjugate type + // then the behavior will be different if the user uses H() or HTri() for a + // triangular matrix. + + // Unconjugate returns the underlying Matrix stored for the implicit + // conjugate transpose. + Unconjugate() CMatrix +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/consts.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/consts.go new file mode 100644 index 00000000..a7b6370d --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/consts.go @@ -0,0 +1,54 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +// TriKind represents the triangularity of the matrix. +type TriKind bool + +const ( + // Upper specifies an upper triangular matrix. + Upper TriKind = true + // Lower specifies a lower triangular matrix. + Lower TriKind = false +) + +// SVDKind specifies the treatment of singular vectors during an SVD +// factorization. +type SVDKind int + +const ( + // SVDNone specifies that no singular vectors should be computed during + // the decomposition. + SVDNone SVDKind = iota + 1 + // SVDThin computes the thin singular vectors, that is, it computes + // A = U~ * Σ * V~^T + // where U~ is of size m×min(m,n), Σ is a diagonal matrix of size min(m,n)×min(m,n) + // and V~ is of size n×min(m,n). + SVDThin + // SVDFull computes the full singular value decomposition, + // A = U * Σ * V^T + // where U is of size m×m, Σ is an m×n diagonal matrix, and V is an n×n matrix. + SVDFull +) + +// GSVDKind specifies the treatment of singular vectors during a GSVD +// factorization. +type GSVDKind int + +const ( + // GSVDU specifies that the U singular vectors should be computed during + // the decomposition. + GSVDU GSVDKind = 1 << iota + // GSVDV specifies that the V singular vectors should be computed during + // the decomposition. + GSVDV + // GSVDQ specifies that the Q singular vectors should be computed during + // the decomposition. + GSVDQ + + // GSVDNone specifies that no singular vector should be computed during + // the decomposition. + GSVDNone +) diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/dense.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/dense.go new file mode 100644 index 00000000..fbcac756 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/dense.go @@ -0,0 +1,533 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + dense *Dense + + _ Matrix = dense + _ Mutable = dense + + _ Cloner = dense + _ RowViewer = dense + _ ColViewer = dense + _ RawRowViewer = dense + _ Grower = dense + + _ RawMatrixSetter = dense + _ RawMatrixer = dense + + _ Reseter = dense +) + +// Dense is a dense matrix representation. +type Dense struct { + mat blas64.General + + capRows, capCols int +} + +// NewDense creates a new Dense matrix with r rows and c columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == r*c, data is +// used as the backing slice, and changes to the elements of the returned Dense +// will be reflected in data. If neither of these is true, NewDense will panic. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +func NewDense(r, c int, data []float64) *Dense { + if r < 0 || c < 0 { + panic("mat: negative dimension") + } + if data != nil && r*c != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]float64, r*c) + } + return &Dense{ + mat: blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: data, + }, + capRows: r, + capCols: c, + } +} + +// reuseAs resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. +// +// reuseAs must be kept in sync with reuseAsZeroed. +func (m *Dense) reuseAs(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: use(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } +} + +// reuseAsZeroed resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. It zeroes +// all the elements of the matrix. +// +// reuseAsZeroed must be kept in sync with reuseAs. +func (m *Dense) reuseAsZeroed(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroed(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } +} + +// untranspose untransposes a matrix if applicable. If a is an Untransposer, then +// untranspose returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func untranspose(a Matrix) (Matrix, bool) { + if ut, ok := a.(Untransposer); ok { + return ut.Untranspose(), true + } + return a, false +} + +// isolatedWorkspace returns a new dense matrix w with the size of a and +// returns a callback to defer which performs cleanup at the return of the call. +// This should be used when a method receiver is the same pointer as an input argument. +func (m *Dense) isolatedWorkspace(a Matrix) (w *Dense, restore func()) { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + w = getWorkspace(r, c, false) + return w, func() { + m.Copy(w) + putWorkspace(w) + } +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (m *Dense) Reset() { + // Row, Cols and Stride must be zeroed in unison. + m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 + m.capRows, m.capCols = 0, 0 + m.mat.Data = m.mat.Data[:0] +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. Dense matrices can be zeroed using Reset. +func (m *Dense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return m.mat.Stride == 0 +} + +// asTriDense returns a TriDense with the given size and side. The backing data +// of the TriDense is the same as the receiver. +func (m *Dense) asTriDense(n int, diag blas.Diag, uplo blas.Uplo) *TriDense { + return &TriDense{ + mat: blas64.Triangular{ + N: n, + Stride: m.mat.Stride, + Data: m.mat.Data, + Uplo: uplo, + Diag: diag, + }, + cap: n, + } +} + +// DenseCopyOf returns a newly allocated copy of the elements of a. +func DenseCopyOf(a Matrix) *Dense { + d := &Dense{} + d.Clone(a) + return d +} + +// SetRawMatrix sets the underlying blas64.General used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in b. +func (m *Dense) SetRawMatrix(b blas64.General) { + m.capRows, m.capCols = b.Rows, b.Cols + m.mat = b +} + +// RawMatrix returns the underlying blas64.General used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.General. +func (m *Dense) RawMatrix() blas64.General { return m.mat } + +// Dims returns the number of rows and columns in the matrix. +func (m *Dense) Dims() (r, c int) { return m.mat.Rows, m.mat.Cols } + +// Caps returns the number of rows and columns in the backing matrix. +func (m *Dense) Caps() (r, c int) { return m.capRows, m.capCols } + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (m *Dense) T() Matrix { + return Transpose{m} +} + +// ColView returns a Vector reflecting the column j, backed by the matrix data. +// +// See ColViewer for more information. +func (m *Dense) ColView(j int) Vector { + var v VecDense + v.ColViewOf(m, j) + return &v +} + +// SetCol sets the values in the specified column of the matrix to the values +// in src. len(src) must equal the number of rows in the receiver. +func (m *Dense) SetCol(j int, src []float64) { + if j >= m.mat.Cols || j < 0 { + panic(ErrColAccess) + } + if len(src) != m.mat.Rows { + panic(ErrColLength) + } + + blas64.Copy(m.mat.Rows, + blas64.Vector{Inc: 1, Data: src}, + blas64.Vector{Inc: m.mat.Stride, Data: m.mat.Data[j:]}, + ) +} + +// SetRow sets the values in the specified rows of the matrix to the values +// in src. len(src) must equal the number of columns in the receiver. +func (m *Dense) SetRow(i int, src []float64) { + if i >= m.mat.Rows || i < 0 { + panic(ErrRowAccess) + } + if len(src) != m.mat.Cols { + panic(ErrRowLength) + } + + copy(m.rawRowView(i), src) +} + +// RowView returns row i of the matrix data represented as a column vector, +// backed by the matrix data. +// +// See RowViewer for more information. +func (m *Dense) RowView(i int) Vector { + var v VecDense + v.RowViewOf(m, i) + return &v +} + +// RawRowView returns a slice backed by the same array as backing the +// receiver. +func (m *Dense) RawRowView(i int) []float64 { + if i >= m.mat.Rows || i < 0 { + panic(ErrRowAccess) + } + return m.rawRowView(i) +} + +func (m *Dense) rawRowView(i int) []float64 { + return m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+m.mat.Cols] +} + +// Slice returns a new Matrix that shares backing data with the receiver. +// The returned matrix starts at {i,j} of the receiver and extends k-i rows +// and l-j columns. The final row in the resulting matrix is k-1 and the +// final column is l-1. +// Slice panics with ErrIndexOutOfRange if the slice is outside the capacity +// of the receiver. +func (m *Dense) Slice(i, k, j, l int) Matrix { + mr, mc := m.Caps() + if i < 0 || mr <= i || j < 0 || mc <= j || k <= i || mr < k || l <= j || mc < l { + panic(ErrIndexOutOfRange) + } + t := *m + t.mat.Data = t.mat.Data[i*t.mat.Stride+j : (k-1)*t.mat.Stride+l] + t.mat.Rows = k - i + t.mat.Cols = l - j + t.capRows -= i + t.capCols -= j + return &t +} + +// Grow returns the receiver expanded by r rows and c columns. If the dimensions +// of the expanded matrix are outside the capacities of the receiver a new +// allocation is made, otherwise not. Note the receiver itself is not modified +// during the call to Grow. +func (m *Dense) Grow(r, c int) Matrix { + if r < 0 || c < 0 { + panic(ErrIndexOutOfRange) + } + if r == 0 && c == 0 { + return m + } + + r += m.mat.Rows + c += m.mat.Cols + + var t Dense + switch { + case m.mat.Rows == 0 || m.mat.Cols == 0: + t.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + // We zero because we don't know how the matrix will be used. + // In other places, the mat is immediately filled with a result; + // this is not the case here. + Data: useZeroed(m.mat.Data, r*c), + } + case r > m.capRows || c > m.capCols: + cr := max(r, m.capRows) + cc := max(c, m.capCols) + t.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: cc, + Data: make([]float64, cr*cc), + } + t.capRows = cr + t.capCols = cc + // Copy the complete matrix over to the new matrix. + // Including elements not currently visible. Use a temporary structure + // to avoid modifying the receiver. + var tmp Dense + tmp.mat = blas64.General{ + Rows: m.mat.Rows, + Cols: m.mat.Cols, + Stride: m.mat.Stride, + Data: m.mat.Data, + } + tmp.capRows = m.capRows + tmp.capCols = m.capCols + t.Copy(&tmp) + return &t + default: + t.mat = blas64.General{ + Data: m.mat.Data[:(r-1)*m.mat.Stride+c], + Rows: r, + Cols: c, + Stride: m.mat.Stride, + } + } + t.capRows = r + t.capCols = c + return &t +} + +// Clone makes a copy of a into the receiver, overwriting the previous value of +// the receiver. The clone operation does not make any restriction on shape and +// will not cause shadowing. +// +// See the Cloner interface for more information. +func (m *Dense) Clone(a Matrix) { + r, c := a.Dims() + mat := blas64.General{ + Rows: r, + Cols: c, + Stride: c, + } + m.capRows, m.capCols = r, c + + aU, trans := untranspose(a) + switch aU := aU.(type) { + case RawMatrixer: + amat := aU.RawMatrix() + mat.Data = make([]float64, r*c) + if trans { + for i := 0; i < r; i++ { + blas64.Copy(c, + blas64.Vector{Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, + blas64.Vector{Inc: 1, Data: mat.Data[i*c : (i+1)*c]}) + } + } else { + for i := 0; i < r; i++ { + copy(mat.Data[i*c:(i+1)*c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + } + case *VecDense: + amat := aU.mat + mat.Data = make([]float64, aU.n) + blas64.Copy(aU.n, + blas64.Vector{Inc: amat.Inc, Data: amat.Data}, + blas64.Vector{Inc: 1, Data: mat.Data}) + default: + mat.Data = make([]float64, r*c) + w := *m + w.mat = mat + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + w.set(i, j, a.At(i, j)) + } + } + *m = w + return + } + m.mat = mat +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. If a aliases the receiver +// and is a transposed Dense or VecDense, with a non-unitary increment, Copy will +// panic. +// +// See the Copier interface for more information. +func (m *Dense) Copy(a Matrix) (r, c int) { + r, c = a.Dims() + if a == m { + return r, c + } + r = min(r, m.mat.Rows) + c = min(c, m.mat.Cols) + if r == 0 || c == 0 { + return 0, 0 + } + + aU, trans := untranspose(a) + switch aU := aU.(type) { + case RawMatrixer: + amat := aU.RawMatrix() + if trans { + if amat.Stride != 1 { + m.checkOverlap(amat) + } + for i := 0; i < r; i++ { + blas64.Copy(c, + blas64.Vector{Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, + blas64.Vector{Inc: 1, Data: m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]}) + } + } else { + switch o := offset(m.mat.Data, amat.Data); { + case o < 0: + for i := r - 1; i >= 0; i-- { + copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + case o > 0: + for i := 0; i < r; i++ { + copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + default: + // Nothing to do. + } + } + case *VecDense: + var n, stride int + amat := aU.mat + if trans { + if amat.Inc != 1 { + m.checkOverlap(aU.asGeneral()) + } + n = c + stride = 1 + } else { + n = r + stride = m.mat.Stride + } + if amat.Inc == 1 && stride == 1 { + copy(m.mat.Data, amat.Data[:n]) + break + } + switch o := offset(m.mat.Data, amat.Data); { + case o < 0: + blas64.Copy(n, + blas64.Vector{Inc: -amat.Inc, Data: amat.Data}, + blas64.Vector{Inc: -stride, Data: m.mat.Data}) + case o > 0: + blas64.Copy(n, + blas64.Vector{Inc: amat.Inc, Data: amat.Data}, + blas64.Vector{Inc: stride, Data: m.mat.Data}) + default: + // Nothing to do. + } + default: + m.checkOverlapMatrix(aU) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, a.At(i, j)) + } + } + } + + return r, c +} + +// Stack appends the rows of b onto the rows of a, placing the result into the +// receiver with b placed in the greater indexed rows. Stack will panic if the +// two input matrices do not have the same number of columns or the constructed +// stacked matrix is not the same shape as the receiver. +func (m *Dense) Stack(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ac != bc || m == a || m == b { + panic(ErrShape) + } + + m.reuseAs(ar+br, ac) + + m.Copy(a) + w := m.Slice(ar, ar+br, 0, bc).(*Dense) + w.Copy(b) +} + +// Augment creates the augmented matrix of a and b, where b is placed in the +// greater indexed columns. Augment will panic if the two input matrices do +// not have the same number of rows or the constructed augmented matrix is +// not the same shape as the receiver. +func (m *Dense) Augment(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || m == a || m == b { + panic(ErrShape) + } + + m.reuseAs(ar, ac+bc) + + m.Copy(a) + w := m.Slice(0, br, ac, ac+bc).(*Dense) + w.Copy(b) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go new file mode 100644 index 00000000..e909a6a1 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go @@ -0,0 +1,885 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Add adds a and b element-wise, placing the result in the receiver. Add +// will panic if the two matrices do not have the same shape. +func (m *Dense) Add(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v + bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)+b.At(r, c)) + } + } +} + +// Sub subtracts the matrix b from a, placing the result in the receiver. Sub +// will panic if the two matrices do not have the same shape. +func (m *Dense) Sub(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v - bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)-b.At(r, c)) + } + } +} + +// MulElem performs element-wise multiplication of a and b, placing the result +// in the receiver. MulElem will panic if the two matrices do not have the same +// shape. +func (m *Dense) MulElem(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v * bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)*b.At(r, c)) + } + } +} + +// DivElem performs element-wise division of a by b, placing the result +// in the receiver. DivElem will panic if the two matrices do not have the same +// shape. +func (m *Dense) DivElem(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v / bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)/b.At(r, c)) + } + } +} + +// Inverse computes the inverse of the matrix a, storing the result into the +// receiver. If a is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally +// be avoided where possible, for example by using the Solve routines. +func (m *Dense) Inverse(a Matrix) error { + // TODO(btracey): Special case for RawTriangular, etc. + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + m.reuseAs(a.Dims()) + aU, aTrans := untranspose(a) + switch rm := aU.(type) { + case RawMatrixer: + if m != aU || aTrans { + if m == aU || m.checkOverlap(rm.RawMatrix()) { + tmp := getWorkspace(r, c, false) + tmp.Copy(a) + m.Copy(tmp) + putWorkspace(tmp) + break + } + m.Copy(a) + } + default: + m.Copy(a) + } + ipiv := getInts(r, false) + defer putInts(ipiv) + ok := lapack64.Getrf(m.mat, ipiv) + if !ok { + return Condition(math.Inf(1)) + } + work := getFloats(4*r, false) // must be at least 4*r for cond. + lapack64.Getri(m.mat, ipiv, work, -1) + if int(work[0]) > 4*r { + l := int(work[0]) + putFloats(work) + work = getFloats(l, false) + } else { + work = work[:4*r] + } + defer putFloats(work) + lapack64.Getri(m.mat, ipiv, work, len(work)) + norm := lapack64.Lange(CondNorm, m.mat, work) + rcond := lapack64.Gecon(CondNorm, m.mat, norm, work, ipiv) // reuse ipiv + if rcond == 0 { + return Condition(math.Inf(1)) + } + cond := 1 / rcond + if cond > ConditionTolerance { + return Condition(cond) + } + return nil +} + +// Mul takes the matrix product of a and b, placing the result in the receiver. +// If the number of columns in a does not equal the number of rows in b, Mul will panic. +func (m *Dense) Mul(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + + if ac != br { + panic(ErrShape) + } + + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + m.reuseAs(ar, bc) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + aT := blas.NoTrans + if aTrans { + aT = blas.Trans + } + bT := blas.NoTrans + if bTrans { + bT = blas.Trans + } + + // Some of the cases do not have a transpose option, so create + // temporary memory. + // C = A^T * B = (B^T * A)^T + // C^T = B^T * A. + if aUrm, ok := aU.(RawMatrixer); ok { + amat := aUrm.RawMatrix() + if restore == nil { + m.checkOverlap(amat) + } + if bUrm, ok := bU.(RawMatrixer); ok { + bmat := bUrm.RawMatrix() + if restore == nil { + m.checkOverlap(bmat) + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + if bU, ok := bU.(RawSymmetricer); ok { + bmat := bU.RawSymmetric() + if aTrans { + c := getWorkspace(ac, ar, false) + blas64.Symm(blas.Left, 1, bmat, amat, 0, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + blas64.Symm(blas.Right, 1, bmat, amat, 0, m.mat) + return + } + if bU, ok := bU.(RawTriangular); ok { + // Trmm updates in place, so copy aU first. + bmat := bU.RawTriangular() + if aTrans { + c := getWorkspace(ac, ar, false) + var tmp Dense + tmp.SetRawMatrix(amat) + c.Copy(&tmp) + bT := blas.Trans + if bTrans { + bT = blas.NoTrans + } + blas64.Trmm(blas.Left, bT, 1, bmat, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + m.Copy(a) + blas64.Trmm(blas.Right, bT, 1, bmat, m.mat) + return + } + if bU, ok := bU.(*VecDense); ok { + m.checkOverlap(bU.asGeneral()) + bvec := bU.RawVector() + if bTrans { + // {ar,1} x {1,bc}, which is not a vector. + // Instead, construct B as a General. + bmat := blas64.General{ + Rows: bc, + Cols: 1, + Stride: bvec.Inc, + Data: bvec.Data, + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + cvec := blas64.Vector{ + Inc: m.mat.Stride, + Data: m.mat.Data, + } + blas64.Gemv(aT, 1, amat, bvec, 0, cvec) + return + } + } + if bUrm, ok := bU.(RawMatrixer); ok { + bmat := bUrm.RawMatrix() + if restore == nil { + m.checkOverlap(bmat) + } + if aU, ok := aU.(RawSymmetricer); ok { + amat := aU.RawSymmetric() + if bTrans { + c := getWorkspace(bc, br, false) + blas64.Symm(blas.Right, 1, amat, bmat, 0, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + blas64.Symm(blas.Left, 1, amat, bmat, 0, m.mat) + return + } + if aU, ok := aU.(RawTriangular); ok { + // Trmm updates in place, so copy bU first. + amat := aU.RawTriangular() + if bTrans { + c := getWorkspace(bc, br, false) + var tmp Dense + tmp.SetRawMatrix(bmat) + c.Copy(&tmp) + aT := blas.Trans + if aTrans { + aT = blas.NoTrans + } + blas64.Trmm(blas.Right, aT, 1, amat, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + m.Copy(b) + blas64.Trmm(blas.Left, aT, 1, amat, m.mat) + return + } + if aU, ok := aU.(*VecDense); ok { + m.checkOverlap(aU.asGeneral()) + avec := aU.RawVector() + if aTrans { + // {1,ac} x {ac, bc} + // Transpose B so that the vector is on the right. + cvec := blas64.Vector{ + Inc: 1, + Data: m.mat.Data, + } + bT := blas.Trans + if bTrans { + bT = blas.NoTrans + } + blas64.Gemv(bT, 1, bmat, avec, 0, cvec) + return + } + // {ar,1} x {1,bc} which is not a vector result. + // Instead, construct A as a General. + amat := blas64.General{ + Rows: ar, + Cols: 1, + Stride: avec.Inc, + Data: avec.Data, + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + row := getFloats(ac, false) + defer putFloats(row) + for r := 0; r < ar; r++ { + for i := range row { + row[i] = a.At(r, i) + } + for c := 0; c < bc; c++ { + var v float64 + for i, e := range row { + v += e * b.At(i, c) + } + m.mat.Data[r*m.mat.Stride+c] = v + } + } +} + +// strictCopy copies a into m panicking if the shape of a and m differ. +func strictCopy(m *Dense, a Matrix) { + r, c := m.Copy(a) + if r != m.mat.Rows || c != m.mat.Cols { + // Panic with a string since this + // is not a user-facing panic. + panic(ErrShape.Error()) + } +} + +// Exp calculates the exponential of the matrix a, e^a, placing the result +// in the receiver. Exp will panic with matrix.ErrShape if a is not square. +func (m *Dense) Exp(a Matrix) { + // The implementation used here is from Functions of Matrices: Theory and Computation + // Chapter 10, Algorithm 10.20. https://doi.org/10.1137/1.9780898717778.ch10 + + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + + m.reuseAs(r, r) + if r == 1 { + m.mat.Data[0] = math.Exp(a.At(0, 0)) + return + } + + pade := []struct { + theta float64 + b []float64 + }{ + {theta: 0.015, b: []float64{ + 120, 60, 12, 1, + }}, + {theta: 0.25, b: []float64{ + 30240, 15120, 3360, 420, 30, 1, + }}, + {theta: 0.95, b: []float64{ + 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1, + }}, + {theta: 2.1, b: []float64{ + 17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1, + }}, + } + + a1 := m + a1.Copy(a) + v := getWorkspace(r, r, true) + vraw := v.RawMatrix() + vvec := blas64.Vector{Inc: 1, Data: vraw.Data} + defer putWorkspace(v) + + u := getWorkspace(r, r, true) + uraw := u.RawMatrix() + uvec := blas64.Vector{Inc: 1, Data: uraw.Data} + defer putWorkspace(u) + + a2 := getWorkspace(r, r, false) + defer putWorkspace(a2) + + n1 := Norm(a, 1) + for i, t := range pade { + if n1 > t.theta { + continue + } + + // This loop only executes once, so + // this is not as horrible as it looks. + p := getWorkspace(r, r, true) + praw := p.RawMatrix() + pvec := blas64.Vector{Inc: 1, Data: praw.Data} + defer putWorkspace(p) + + for k := 0; k < r; k++ { + p.set(k, k, 1) + v.set(k, k, t.b[0]) + u.set(k, k, t.b[1]) + } + + a2.Mul(a1, a1) + for j := 0; j <= i; j++ { + p.Mul(p, a2) + blas64.Axpy(r*r, t.b[2*j+2], pvec, vvec) + blas64.Axpy(r*r, t.b[2*j+3], pvec, uvec) + } + u.Mul(a1, u) + + // Use p as a workspace here and + // rename u for the second call's + // receiver. + vmu, vpu := u, p + vpu.Add(v, u) + vmu.Sub(v, u) + + m.Solve(vmu, vpu) + return + } + + // Remaining Padé table line. + const theta13 = 5.4 + b := [...]float64{ + 64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, + 129060195264000, 10559470521600, 670442572800, 33522128640, + 1323241920, 40840800, 960960, 16380, 182, 1, + } + + s := math.Log2(n1 / theta13) + if s >= 0 { + s = math.Ceil(s) + a1.Scale(1/math.Pow(2, s), a1) + } + a2.Mul(a1, a1) + + i := getWorkspace(r, r, true) + for j := 0; j < r; j++ { + i.set(j, j, 1) + } + iraw := i.RawMatrix() + ivec := blas64.Vector{Inc: 1, Data: iraw.Data} + defer putWorkspace(i) + + a2raw := a2.RawMatrix() + a2vec := blas64.Vector{Inc: 1, Data: a2raw.Data} + + a4 := getWorkspace(r, r, false) + a4raw := a4.RawMatrix() + a4vec := blas64.Vector{Inc: 1, Data: a4raw.Data} + defer putWorkspace(a4) + a4.Mul(a2, a2) + + a6 := getWorkspace(r, r, false) + a6raw := a6.RawMatrix() + a6vec := blas64.Vector{Inc: 1, Data: a6raw.Data} + defer putWorkspace(a6) + a6.Mul(a2, a4) + + // V = A_6(b_12*A_6 + b_10*A_4 + b_8*A_2) + b_6*A_6 + b_4*A_4 + b_2*A_2 +b_0*I + blas64.Axpy(r*r, b[12], a6vec, vvec) + blas64.Axpy(r*r, b[10], a4vec, vvec) + blas64.Axpy(r*r, b[8], a2vec, vvec) + v.Mul(v, a6) + blas64.Axpy(r*r, b[6], a6vec, vvec) + blas64.Axpy(r*r, b[4], a4vec, vvec) + blas64.Axpy(r*r, b[2], a2vec, vvec) + blas64.Axpy(r*r, b[0], ivec, vvec) + + // U = A(A_6(b_13*A_6 + b_11*A_4 + b_9*A_2) + b_7*A_6 + b_5*A_4 + b_2*A_3 +b_1*I) + blas64.Axpy(r*r, b[13], a6vec, uvec) + blas64.Axpy(r*r, b[11], a4vec, uvec) + blas64.Axpy(r*r, b[9], a2vec, uvec) + u.Mul(u, a6) + blas64.Axpy(r*r, b[7], a6vec, uvec) + blas64.Axpy(r*r, b[5], a4vec, uvec) + blas64.Axpy(r*r, b[3], a2vec, uvec) + blas64.Axpy(r*r, b[1], ivec, uvec) + u.Mul(u, a1) + + // Use i as a workspace here and + // rename u for the second call's + // receiver. + vmu, vpu := u, i + vpu.Add(v, u) + vmu.Sub(v, u) + + m.Solve(vmu, vpu) + + for ; s > 0; s-- { + m.Mul(m, m) + } +} + +// Pow calculates the integral power of the matrix a to n, placing the result +// in the receiver. Pow will panic if n is negative or if a is not square. +func (m *Dense) Pow(a Matrix, n int) { + if n < 0 { + panic("matrix: illegal power") + } + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + + m.reuseAs(r, c) + + // Take possible fast paths. + switch n { + case 0: + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + m.mat.Data[i*m.mat.Stride+i] = 1 + } + return + case 1: + m.Copy(a) + return + case 2: + m.Mul(a, a) + return + } + + // Perform iterative exponentiation by squaring in work space. + w := getWorkspace(r, r, false) + w.Copy(a) + s := getWorkspace(r, r, false) + s.Copy(a) + x := getWorkspace(r, r, false) + for n--; n > 0; n >>= 1 { + if n&1 != 0 { + x.Mul(w, s) + w, x = x, w + } + if n != 1 { + x.Mul(s, s) + s, x = x, s + } + } + m.Copy(w) + putWorkspace(w) + putWorkspace(s) + putWorkspace(x) +} + +// Scale multiplies the elements of a by f, placing the result in the receiver. +// +// See the Scaler interface for more information. +func (m *Dense) Scale(f float64, a Matrix) { + ar, ac := a.Dims() + + m.reuseAs(ar, ac) + + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + amat := rm.RawMatrix() + if m == aU || m.checkOverlap(amat) { + var restore func() + m, restore = m.isolatedWorkspace(a) + defer restore() + } + if !aTrans { + for ja, jm := 0, 0; ja < ar*amat.Stride; ja, jm = ja+amat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v * f + } + } + } else { + for ja, jm := 0, 0; ja < ac*amat.Stride; ja, jm = ja+amat.Stride, jm+1 { + for i, v := range amat.Data[ja : ja+ar] { + m.mat.Data[i*m.mat.Stride+jm] = v * f + } + } + } + return + } + + m.checkOverlapMatrix(a) + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, f*a.At(r, c)) + } + } +} + +// Apply applies the function fn to each of the elements of a, placing the +// resulting matrix in the receiver. The function fn takes a row/column +// index and element value and returns some function of that tuple. +func (m *Dense) Apply(fn func(i, j int, v float64) float64, a Matrix) { + ar, ac := a.Dims() + + m.reuseAs(ar, ac) + + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + amat := rm.RawMatrix() + if m == aU || m.checkOverlap(amat) { + var restore func() + m, restore = m.isolatedWorkspace(a) + defer restore() + } + if !aTrans { + for j, ja, jm := 0, 0, 0; ja < ar*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = fn(j, i, v) + } + } + } else { + for j, ja, jm := 0, 0, 0; ja < ac*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+1 { + for i, v := range amat.Data[ja : ja+ar] { + m.mat.Data[i*m.mat.Stride+jm] = fn(i, j, v) + } + } + } + return + } + + m.checkOverlapMatrix(a) + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, fn(r, c, a.At(r, c))) + } + } +} + +// RankOne performs a rank-one update to the matrix a and stores the result +// in the receiver. If a is zero, see Outer. +// m = a + alpha * x * y' +func (m *Dense) RankOne(a Matrix, alpha float64, x, y Vector) { + ar, ac := a.Dims() + xr, xc := x.Dims() + if xr != ar || xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yr != ac || yc != 1 { + panic(ErrShape) + } + + if a != m { + aU, _ := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m.checkOverlap(rm.RawMatrix()) + } + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + m.checkOverlap((&VecDense{mat: xmat, n: x.Len()}).asGeneral()) + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + m.checkOverlap((&VecDense{mat: ymat, n: y.Len()}).asGeneral()) + } else { + fast = false + } + + if fast { + if m != a { + m.reuseAs(ar, ac) + m.Copy(a) + } + blas64.Ger(alpha, xmat, ymat, m.mat) + return + } + + m.reuseAs(ar, ac) + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + m.set(i, j, a.At(i, j)+alpha*x.AtVec(i)*y.AtVec(j)) + } + } +} + +// Outer calculates the outer product of the column vectors x and y, +// and stores the result in the receiver. +// m = alpha * x * y' +// In order to update an existing matrix, see RankOne. +func (m *Dense) Outer(alpha float64, x, y Vector) { + xr, xc := x.Dims() + if xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yc != 1 { + panic(ErrShape) + } + + r := xr + c := yr + + // Copied from reuseAs with use replaced by useZeroed + // and a final zero of the matrix elements if we pass + // the shape checks. + // TODO(kortschak): Factor out into reuseZeroedAs if + // we find another case that needs it. + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroed(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + } else if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + m.checkOverlap((&VecDense{mat: xmat, n: x.Len()}).asGeneral()) + + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + m.checkOverlap((&VecDense{mat: ymat, n: y.Len()}).asGeneral()) + } else { + fast = false + } + + if fast { + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } + blas64.Ger(alpha, xmat, ymat, m.mat) + return + } + + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, alpha*x.AtVec(i)*y.AtVec(j)) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/doc.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/doc.go new file mode 100644 index 00000000..73d646bc --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/doc.go @@ -0,0 +1,169 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mat provides implementations of float64 and complex128 matrix +// structures and linear algebra operations on them. +// +// Overview +// +// This section provides a quick overview of the mat package. The following +// sections provide more in depth commentary. +// +// mat provides: +// - Interfaces for Matrix classes (Matrix, Symmetric, Triangular) +// - Concrete implementations (Dense, SymDense, TriDense) +// - Methods and functions for using matrix data (Add, Trace, SymRankOne) +// - Types for constructing and using matrix factorizations (QR, LU) +// - The complementary types for complex matrices, CMatrix, CSymDense, etc. +// +// A matrix may be constructed through the corresponding New function. If no +// backing array is provided the matrix will be initialized to all zeros. +// // Allocate a zeroed real matrix of size 3×5 +// zero := mat.NewDense(3, 5, nil) +// If a backing data slice is provided, the matrix will have those elements. +// Matrices are all stored in row-major format. +// // Generate a 6×6 matrix of random values. +// data := make([]float64, 36) +// for i := range data { +// data[i] = rand.NormFloat64() +// } +// a := mat.NewDense(6, 6, data) +// Operations involving matrix data are implemented as functions when the values +// of the matrix remain unchanged +// tr := mat.Trace(a) +// and are implemented as methods when the operation modifies the receiver. +// zero.Copy(a) +// +// Receivers must be the correct size for the matrix operations, otherwise the +// operation will panic. As a special case for convenience, a zero-value matrix +// will be modified to have the correct size, allocating data if necessary. +// var c mat.Dense // construct a new zero-sized matrix +// c.Mul(a, a) // c is automatically adjusted to be 6×6 +// +// Zero-value of a matrix +// +// A zero-value matrix is either the Go language definition of a zero-value or +// is a zero-sized matrix with zero-length stride. Matrix implementations may have +// a Reset method to revert the receiver into a zero-valued matrix and an IsZero +// method that returns whether the matrix is zero-valued. +// So the following will all result in a zero-value matrix. +// - var a mat.Dense +// - a := NewDense(0, 0, make([]float64, 0, 100)) +// - a.Reset() +// A zero-value matrix can not be sliced even if it does have an adequately sized +// backing data slice, but can be expanded using its Grow method if it exists. +// +// The Matrix Interfaces +// +// The Matrix interface is the common link between the concrete types of real +// matrices, The Matrix interface is defined by three functions: Dims, which +// returns the dimensions of the Matrix, At, which returns the element in the +// specified location, and T for returning a Transpose (discussed later). All of +// the concrete types can perform these behaviors and so implement the interface. +// Methods and functions are designed to use this interface, so in particular the method +// func (m *Dense) Mul(a, b Matrix) +// constructs a *Dense from the result of a multiplication with any Matrix types, +// not just *Dense. Where more restrictive requirements must be met, there are also the +// Symmetric and Triangular interfaces. For example, in +// func (s *SymDense) AddSym(a, b Symmetric) +// the Symmetric interface guarantees a symmetric result. +// +// The CMatrix interface plays the same role for complex matrices. The difference +// is that the CMatrix type has the H method instead T, for returning the conjugate +// transpose. +// +// (Conjugate) Transposes +// +// The T method is used for transposition on real matrices, and H is used for +// conjugate transposition on complex matrices. For example, c.Mul(a.T(), b) computes +// c = a^T * b. The mat types implement this method implicitly — +// see the Transpose and Conjugate types for more details. Note that some +// operations have a transpose as part of their definition, as in *SymDense.SymOuterK. +// +// Matrix Factorization +// +// Matrix factorizations, such as the LU decomposition, typically have their own +// specific data storage, and so are each implemented as a specific type. The +// factorization can be computed through a call to Factorize +// var lu mat.LU +// lu.Factorize(a) +// The elements of the factorization can be extracted through methods on the +// factorized type, i.e. *LU.UTo. The factorization types can also be used directly, +// as in *Dense.SolveCholesky. Some factorizations can be updated directly, +// without needing to update the original matrix and refactorize, +// as in *LU.RankOne. +// +// BLAS and LAPACK +// +// BLAS and LAPACK are the standard APIs for linear algebra routines. Many +// operations in mat are implemented using calls to the wrapper functions +// in gonum/blas/blas64 and gonum/lapack/lapack64 and their complex equivalents. +// By default, blas64 and lapack64 call the native Go implementations of the +// routines. Alternatively, it is possible to use C-based implementations of the +// APIs through the respective cgo packages and "Use" functions. The Go +// implementation of LAPACK (used by default) makes calls +// through blas64, so if a cgo BLAS implementation is registered, the lapack64 +// calls will be partially executed in Go and partially executed in C. +// +// Type Switching +// +// The Matrix abstraction enables efficiency as well as interoperability. Go's +// type reflection capabilities are used to choose the most efficient routine +// given the specific concrete types. For example, in +// c.Mul(a, b) +// if a and b both implement RawMatrixer, that is, they can be represented as a +// blas64.General, blas64.Gemm (general matrix multiplication) is called, while +// instead if b is a RawSymmetricer blas64.Symm is used (general-symmetric +// multiplication), and if b is a *VecDense blas64.Gemv is used. +// +// There are many possible type combinations and special cases. No specific guarantees +// are made about the performance of any method, and in particular, note that an +// abstract matrix type may be copied into a concrete type of the corresponding +// value. If there are specific special cases that are needed, please submit a +// pull-request or file an issue. +// +// Invariants +// +// Matrix input arguments to functions are never directly modified. If an operation +// changes Matrix data, the mutated matrix will be the receiver of a function. +// +// For convenience, a matrix may be used as both a receiver and as an input, e.g. +// a.Pow(a, 6) +// v.SolveVec(a.T(), v) +// though in many cases this will cause an allocation (see Element Aliasing). +// An exception to this rule is Copy, which does not allow a.Copy(a.T()). +// +// Element Aliasing +// +// Most methods in mat modify receiver data. It is forbidden for the modified +// data region of the receiver to overlap the used data area of the input +// arguments. The exception to this rule is when the method receiver is equal to one +// of the input arguments, as in the a.Pow(a, 6) call above, or its implicit transpose. +// +// This prohibition is to help avoid subtle mistakes when the method needs to read +// from and write to the same data region. There are ways to make mistakes using the +// mat API, and mat functions will detect and complain about those. +// There are many ways to make mistakes by excursion from the mat API via +// interaction with raw matrix values. +// +// If you need to read the rest of this section to understand the behavior of +// your program, you are being clever. Don't be clever. If you must be clever, +// blas64 and lapack64 may be used to call the behavior directly. +// +// mat will use the following rules to detect overlap between the receiver and one +// of the inputs: +// - the input implements one of the Raw methods, and +// - the address ranges of the backing data slices overlap, and +// - the strides differ or there is an overlap in the used data elements. +// If such an overlap is detected, the method will panic. +// +// The following cases will not panic: +// - the data slices do not overlap, +// - there is pointer identity between the receiver and input values after +// the value has been untransposed if necessary. +// +// mat will not attempt to detect element overlap if the input does not implement a +// Raw method. Method behavior is undefined if there is undetected overlap. +// +package mat diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/eigen.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/eigen.go new file mode 100644 index 00000000..6204a70f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/eigen.go @@ -0,0 +1,261 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badFact = "mat: use without successful factorization" + badNoVect = "mat: eigenvectors not computed" +) + +// EigenSym is a type for creating and manipulating the Eigen decomposition of +// symmetric matrices. +type EigenSym struct { + vectorsComputed bool + + values []float64 + vectors *Dense +} + +// Factorize computes the eigenvalue decomposition of the symmetric matrix a. +// The Eigen decomposition is defined as +// A = P * D * P^-1 +// where D is a diagonal matrix containing the eigenvalues of the matrix, and +// P is a matrix of the eigenvectors of A. Factorize computes the eigenvalues +// in ascending order. If the vectors input argument is false, the eigenvectors +// are not computed. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, methods that require a successful factorization will panic. +func (e *EigenSym) Factorize(a Symmetric, vectors bool) (ok bool) { + n := a.Symmetric() + sd := NewSymDense(n, nil) + sd.CopySym(a) + + jobz := lapack.EVJob(lapack.None) + if vectors { + jobz = lapack.ComputeEV + } + w := make([]float64, n) + work := []float64{0} + lapack64.Syev(jobz, sd.mat, w, work, -1) + + work = getFloats(int(work[0]), false) + ok = lapack64.Syev(jobz, sd.mat, w, work, len(work)) + putFloats(work) + if !ok { + e.vectorsComputed = false + e.values = nil + e.vectors = nil + return false + } + e.vectorsComputed = vectors + e.values = w + e.vectors = NewDense(n, n, sd.mat.Data) + return true +} + +// succFact returns whether the receiver contains a successful factorization. +func (e *EigenSym) succFact() bool { + return len(e.values) != 0 +} + +// Values extracts the eigenvalues of the factorized matrix. If dst is +// non-nil, the values are stored in-place into dst. In this case +// dst must have length n, otherwise Values will panic. If dst is +// nil, then a new slice will be allocated of the proper length and filled +// with the eigenvalues. +// +// Values panics if the Eigen decomposition was not successful. +func (e *EigenSym) Values(dst []float64) []float64 { + if !e.succFact() { + panic(badFact) + } + if dst == nil { + dst = make([]float64, len(e.values)) + } + if len(dst) != len(e.values) { + panic(ErrSliceLengthMismatch) + } + copy(dst, e.values) + return dst +} + +// EigenvectorsSym extracts the eigenvectors of the factorized matrix and stores +// them in the receiver. Each eigenvector is a column corresponding to the +// respective eigenvalue returned by e.Values. +// +// EigenvectorsSym panics if the factorization was not successful or if the +// decomposition did not compute the eigenvectors. +func (m *Dense) EigenvectorsSym(e *EigenSym) { + if !e.succFact() { + panic(badFact) + } + if !e.vectorsComputed { + panic(badNoVect) + } + m.reuseAs(len(e.values), len(e.values)) + m.Copy(e.vectors) +} + +// Eigen is a type for creating and using the eigenvalue decomposition of a dense matrix. +type Eigen struct { + n int // The size of the factorized matrix. + + right bool // have the right eigenvectors been computed + left bool // have the left eigenvectors been computed + + values []complex128 + rVectors *Dense + lVectors *Dense +} + +// succFact returns whether the receiver contains a successful factorization. +func (e *Eigen) succFact() bool { + return len(e.values) != 0 +} + +// Factorize computes the eigenvalues of the square matrix a, and optionally +// the eigenvectors. +// +// A right eigenvalue/eigenvector combination is defined by +// A * x_r = λ * x_r +// where x_r is the column vector called an eigenvector, and λ is the corresponding +// eigenvector. +// +// Similarly, a left eigenvalue/eigenvector combination is defined by +// x_l * A = λ * x_l +// The eigenvalues, but not the eigenvectors, are the same for both decompositions. +// +// Typically eigenvectors refer to right eigenvectors. +// +// In all cases, Eigen computes the eigenvalues of the matrix. If right and left +// are true, then the right and left eigenvectors will be computed, respectively. +// Eigen panics if the input matrix is not square. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, methods that require a successful factorization will panic. +func (e *Eigen) Factorize(a Matrix, left, right bool) (ok bool) { + // TODO(btracey): Change implementation to store VecDenses as a *CMat when + // #308 is resolved. + + // Copy a because it is modified during the Lapack call. + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + var sd Dense + sd.Clone(a) + + var vl, vr Dense + var jobvl lapack.LeftEVJob = lapack.None + var jobvr lapack.RightEVJob = lapack.None + if left { + vl = *NewDense(r, r, nil) + jobvl = lapack.ComputeLeftEV + } + if right { + vr = *NewDense(c, c, nil) + jobvr = lapack.ComputeRightEV + } + + wr := getFloats(c, false) + defer putFloats(wr) + wi := getFloats(c, false) + defer putFloats(wi) + + work := []float64{0} + lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, -1) + work = getFloats(int(work[0]), false) + first := lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, len(work)) + putFloats(work) + + if first != 0 { + e.values = nil + return false + } + e.n = r + e.right = right + e.left = left + e.lVectors = &vl + e.rVectors = &vr + values := make([]complex128, r) + for i, v := range wr { + values[i] = complex(v, wi[i]) + } + e.values = values + return true +} + +// Values extracts the eigenvalues of the factorized matrix. If dst is +// non-nil, the values are stored in-place into dst. In this case +// dst must have length n, otherwise Values will panic. If dst is +// nil, then a new slice will be allocated of the proper length and +// filed with the eigenvalues. +// +// Values panics if the Eigen decomposition was not successful. +func (e *Eigen) Values(dst []complex128) []complex128 { + if !e.succFact() { + panic(badFact) + } + if dst == nil { + dst = make([]complex128, e.n) + } + if len(dst) != e.n { + panic(ErrSliceLengthMismatch) + } + copy(dst, e.values) + return dst +} + +// Vectors returns the right eigenvectors of the decomposition. Vectors +// will panic if the right eigenvectors were not computed during the factorization, +// or if the factorization was not successful. +// +// The returned matrix will contain the right eigenvectors of the decomposition +// in the columns of the n×n matrix in the same order as their eigenvalues. +// If the j-th eigenvalue is real, then +// u_j = VL[:,j], +// v_j = VR[:,j], +// and if it is not real, then j and j+1 form a complex conjugate pair and the +// eigenvectors can be recovered as +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// where i is the imaginary unit. The computed eigenvectors are normalized to +// have Euclidean norm equal to 1 and largest component real. +// +// BUG: This signature and behavior will change when issue #308 is resolved. +func (e *Eigen) Vectors() *Dense { + if !e.succFact() { + panic(badFact) + } + if !e.right { + panic(badNoVect) + } + return DenseCopyOf(e.rVectors) +} + +// LeftVectors returns the left eigenvectors of the decomposition. LeftVectors +// will panic if the left eigenvectors were not computed during the factorization. +// or if the factorization was not successful. +// +// See the documentation in lapack64.Geev for the format of the vectors. +// +// BUG: This signature and behavior will change when issue #308 is resolved. +func (e *Eigen) LeftVectors() *Dense { + if !e.succFact() { + panic(badFact) + } + if !e.left { + panic(badNoVect) + } + return DenseCopyOf(e.lVectors) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/errors.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/errors.go new file mode 100644 index 00000000..e47ea71b --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/errors.go @@ -0,0 +1,148 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "fmt" + "runtime" + + "gonum.org/v1/gonum/lapack" +) + +// Condition is the condition number of a matrix. The condition +// number is defined as |A| * |A^-1|. +// +// One important use of Condition is during linear solve routines (finding x such +// that A * x = b). The condition number of A indicates the accuracy of +// the computed solution. A Condition error will be returned if the condition +// number of A is sufficiently large. If A is exactly singular to working precision, +// Condition == ∞, and the solve algorithm may have completed early. If Condition +// is large and finite the solve algorithm will be performed, but the computed +// solution may be innacurate. Due to the nature of finite precision arithmetic, +// the value of Condition is only an approximate test of singularity. +type Condition float64 + +func (c Condition) Error() string { + return fmt.Sprintf("matrix singular or near-singular with condition number %.4e", c) +} + +// ConditionTolerance is the tolerance limit of the condition number. If the +// condition number is above this value, the matrix is considered singular. +const ConditionTolerance = 1e16 + +const ( + // CondNorm is the matrix norm used for computing the condition number by routines + // in the matrix packages. + CondNorm = lapack.MaxRowSum + + // CondNormTrans is the norm used to compute on A^T to get the same result as + // computing CondNorm on A. + CondNormTrans = lapack.MaxColumnSum +) + +const stackTraceBufferSize = 1 << 20 + +// Maybe will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func Maybe(fn func()) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + fn() + return +} + +// MaybeFloat will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func MaybeFloat(fn func() float64) (f float64, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + return fn(), nil +} + +// MaybeComplex will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func MaybeComplex(fn func() complex128) (f complex128, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + return fn(), nil +} + +// Error represents matrix handling errors. These errors can be recovered by Maybe wrappers. +type Error struct{ string } + +func (err Error) Error() string { return err.string } + +var ( + ErrIndexOutOfRange = Error{"matrix: index out of range"} + ErrRowAccess = Error{"matrix: row index out of range"} + ErrColAccess = Error{"matrix: column index out of range"} + ErrVectorAccess = Error{"matrix: vector index out of range"} + ErrZeroLength = Error{"matrix: zero length in matrix dimension"} + ErrRowLength = Error{"matrix: row length mismatch"} + ErrColLength = Error{"matrix: col length mismatch"} + ErrSquare = Error{"matrix: expect square matrix"} + ErrNormOrder = Error{"matrix: invalid norm order for matrix"} + ErrSingular = Error{"matrix: matrix is singular"} + ErrShape = Error{"matrix: dimension mismatch"} + ErrIllegalStride = Error{"matrix: illegal stride"} + ErrPivot = Error{"matrix: malformed pivot list"} + ErrTriangle = Error{"matrix: triangular storage mismatch"} + ErrTriangleSet = Error{"matrix: triangular set out of bounds"} + ErrBandSet = Error{"matrix: band set out of bounds"} + ErrSliceLengthMismatch = Error{"matrix: input slice length mismatch"} + ErrNotPSD = Error{"matrix: input not positive symmetric definite"} + ErrFailedEigen = Error{"matrix: eigendecomposition not successful"} +) + +// ErrorStack represents matrix handling errors that have been recovered by Maybe wrappers. +type ErrorStack struct { + Err error + + // StackTrace is the stack trace + // recovered by Maybe, MaybeFloat + // or MaybeComplex. + StackTrace string +} + +func (err ErrorStack) Error() string { return err.Err.Error() } diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/format.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/format.go new file mode 100644 index 00000000..ce72eb19 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/format.go @@ -0,0 +1,238 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "fmt" + "strconv" +) + +// Formatted returns a fmt.Formatter for the matrix m using the given options. +func Formatted(m Matrix, options ...FormatOption) fmt.Formatter { + f := formatter{ + matrix: m, + dot: '.', + } + for _, o := range options { + o(&f) + } + return f +} + +type formatter struct { + matrix Matrix + prefix string + margin int + dot byte + squeeze bool +} + +// FormatOption is a functional option for matrix formatting. +type FormatOption func(*formatter) + +// Prefix sets the formatted prefix to the string p. Prefix is a string that is prepended to +// each line of output. +func Prefix(p string) FormatOption { + return func(f *formatter) { f.prefix = p } +} + +// Excerpt sets the maximum number of rows and columns to print at the margins of the matrix +// to m. If m is zero or less all elements are printed. +func Excerpt(m int) FormatOption { + return func(f *formatter) { f.margin = m } +} + +// DotByte sets the dot character to b. The dot character is used to replace zero elements +// if the result is printed with the fmt ' ' verb flag. Without a DotByte option, the default +// dot character is '.'. +func DotByte(b byte) FormatOption { + return func(f *formatter) { f.dot = b } +} + +// Squeeze sets the printing behaviour to minimise column width for each individual column. +func Squeeze() FormatOption { + return func(f *formatter) { f.squeeze = true } +} + +// Format satisfies the fmt.Formatter interface. +func (f formatter) Format(fs fmt.State, c rune) { + if c == 'v' && fs.Flag('#') { + fmt.Fprintf(fs, "%#v", f.matrix) + return + } + format(f.matrix, f.prefix, f.margin, f.dot, f.squeeze, fs, c) +} + +// format prints a pretty representation of m to the fs io.Writer. The format character c +// specifies the numerical representation of of elements; valid values are those for float64 +// specified in the fmt package, with their associated flags. In addition to this, a space +// preceding a verb indicates that zero values should be represented by the dot character. +// The printed range of the matrix can be limited by specifying a positive value for margin; +// If margin is greater than zero, only the first and last margin rows/columns of the matrix +// are output. If squeeze is true, column widths are determined on a per-column basis. +// +// format will not provide Go syntax output. +func format(m Matrix, prefix string, margin int, dot byte, squeeze bool, fs fmt.State, c rune) { + rows, cols := m.Dims() + + var printed int + if margin <= 0 { + printed = rows + if cols > printed { + printed = cols + } + } else { + printed = margin + } + + prec, pOk := fs.Precision() + if !pOk { + prec = -1 + } + + var ( + maxWidth int + widths widther + buf, pad []byte + ) + if squeeze { + widths = make(columnWidth, cols) + } else { + widths = new(uniformWidth) + } + switch c { + case 'v', 'e', 'E', 'f', 'F', 'g', 'G': + if c == 'v' { + buf, maxWidth = maxCellWidth(m, 'g', printed, prec, widths) + } else { + buf, maxWidth = maxCellWidth(m, c, printed, prec, widths) + } + default: + fmt.Fprintf(fs, "%%!%c(%T=Dims(%d, %d))", c, m, rows, cols) + return + } + width, _ := fs.Width() + width = max(width, maxWidth) + pad = make([]byte, max(width, 2)) + for i := range pad { + pad[i] = ' ' + } + + first := true + if rows > 2*printed || cols > 2*printed { + first = false + fmt.Fprintf(fs, "Dims(%d, %d)\n", rows, cols) + } + + skipZero := fs.Flag(' ') + for i := 0; i < rows; i++ { + if !first { + fmt.Fprint(fs, prefix) + } + first = false + var el string + switch { + case rows == 1: + fmt.Fprint(fs, "[") + el = "]" + case i == 0: + fmt.Fprint(fs, "⎡") + el = "⎤\n" + case i < rows-1: + fmt.Fprint(fs, "⎢") + el = "⎥\n" + default: + fmt.Fprint(fs, "⎣") + el = "⎦" + } + + for j := 0; j < cols; j++ { + if j >= printed && j < cols-printed { + j = cols - printed - 1 + if i == 0 || i == rows-1 { + fmt.Fprint(fs, "... ... ") + } else { + fmt.Fprint(fs, " ") + } + continue + } + + v := m.At(i, j) + if v == 0 && skipZero { + buf = buf[:1] + buf[0] = dot + } else { + if c == 'v' { + buf = strconv.AppendFloat(buf[:0], v, 'g', prec, 64) + } else { + buf = strconv.AppendFloat(buf[:0], v, byte(c), prec, 64) + } + } + if fs.Flag('-') { + fs.Write(buf) + fs.Write(pad[:widths.width(j)-len(buf)]) + } else { + fs.Write(pad[:widths.width(j)-len(buf)]) + fs.Write(buf) + } + + if j < cols-1 { + fs.Write(pad[:2]) + } + } + + fmt.Fprint(fs, el) + + if i >= printed-1 && i < rows-printed && 2*printed < rows { + i = rows - printed - 1 + fmt.Fprintf(fs, "%s .\n%[1]s .\n%[1]s .\n", prefix) + continue + } + } +} + +func maxCellWidth(m Matrix, c rune, printed, prec int, w widther) ([]byte, int) { + var ( + buf = make([]byte, 0, 64) + rows, cols = m.Dims() + max int + ) + for i := 0; i < rows; i++ { + if i >= printed-1 && i < rows-printed && 2*printed < rows { + i = rows - printed - 1 + continue + } + for j := 0; j < cols; j++ { + if j >= printed && j < cols-printed { + continue + } + + buf = strconv.AppendFloat(buf, m.At(i, j), byte(c), prec, 64) + if len(buf) > max { + max = len(buf) + } + if len(buf) > w.width(j) { + w.setWidth(j, len(buf)) + } + buf = buf[:0] + } + } + return buf, max +} + +type widther interface { + width(i int) int + setWidth(i, w int) +} + +type uniformWidth int + +func (u *uniformWidth) width(_ int) int { return int(*u) } +func (u *uniformWidth) setWidth(_, w int) { *u = uniformWidth(w) } + +type columnWidth []int + +func (c columnWidth) width(i int) int { return c[i] } +func (c columnWidth) setWidth(i, w int) { c[i] = w } diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/gsvd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/gsvd.go new file mode 100644 index 00000000..f2b82cb5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/gsvd.go @@ -0,0 +1,371 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// GSVD is a type for creating and using the Generalized Singular Value Decomposition +// (GSVD) of a matrix. +// +// The factorization is a linear transformation of the data sets from the given +// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" +// spaces. +type GSVD struct { + kind GSVDKind + + r, p, c, k, l int + s1, s2 []float64 + a, b, u, v, q blas64.General + + work []float64 + iwork []int +} + +// Factorize computes the generalized singular value decomposition (GSVD) of the input +// the r×c matrix A and the p×c matrix B. The singular values of A and B are computed +// in all cases, while the singular vectors are optionally computed depending on the +// input kind. +// +// The full singular value decomposition (kind == GSVDU|GSVDV|GSVDQ) deconstructs A and B as +// A = U * Σ₁ * [ 0 R ] * Q^T +// +// B = V * Σ₂ * [ 0 R ] * Q^T +// where Σ₁ and Σ₂ are r×(k+l) and p×(k+l) diagonal matrices of singular values, and +// U, V and Q are r×r, p×p and c×c orthogonal matrices of singular vectors. k+l is the +// effective numerical rank of the matrix [ A^T B^T ]^T. +// +// It is frequently not necessary to compute the full GSVD. Computation time and +// storage costs can be reduced using the appropriate kind. Either only the singular +// values can be computed (kind == SVDNone), or in conjunction with specific singular +// vectors (kind bit set according to matrix.GSVDU, matrix.GSVDV and matrix.GSVDQ). +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (gsvd *GSVD) Factorize(a, b Matrix, kind GSVDKind) (ok bool) { + r, c := a.Dims() + gsvd.r, gsvd.c = r, c + p, c := b.Dims() + gsvd.p = p + if gsvd.c != c { + panic(ErrShape) + } + var jobU, jobV, jobQ lapack.GSVDJob + switch { + default: + panic("gsvd: bad input kind") + case kind == GSVDNone: + jobU = lapack.GSVDNone + jobV = lapack.GSVDNone + jobQ = lapack.GSVDNone + case (GSVDU|GSVDV|GSVDQ)&kind != 0: + if GSVDU&kind != 0 { + jobU = lapack.GSVDU + gsvd.u = blas64.General{ + Rows: r, + Cols: r, + Stride: r, + Data: use(gsvd.u.Data, r*r), + } + } + if GSVDV&kind != 0 { + jobV = lapack.GSVDV + gsvd.v = blas64.General{ + Rows: p, + Cols: p, + Stride: p, + Data: use(gsvd.v.Data, p*p), + } + } + if GSVDQ&kind != 0 { + jobQ = lapack.GSVDQ + gsvd.q = blas64.General{ + Rows: c, + Cols: c, + Stride: c, + Data: use(gsvd.q.Data, c*c), + } + } + } + + // A and B are destroyed on call, so copy the matrices. + aCopy := DenseCopyOf(a) + bCopy := DenseCopyOf(b) + + gsvd.s1 = use(gsvd.s1, c) + gsvd.s2 = use(gsvd.s2, c) + + gsvd.iwork = useInt(gsvd.iwork, c) + + gsvd.work = use(gsvd.work, 1) + lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, -1, gsvd.iwork) + gsvd.work = use(gsvd.work, int(gsvd.work[0])) + gsvd.k, gsvd.l, ok = lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, len(gsvd.work), gsvd.iwork) + if ok { + gsvd.a = aCopy.mat + gsvd.b = bCopy.mat + gsvd.kind = kind + } + return ok +} + +// Kind returns the matrix.GSVDKind of the decomposition. If no decomposition has been +// computed, Kind returns 0. +func (gsvd *GSVD) Kind() GSVDKind { + return gsvd.kind +} + +// Rank returns the k and l terms of the rank of [ A^T B^T ]^T. +func (gsvd *GSVD) Rank() (k, l int) { + return gsvd.k, gsvd.l +} + +// GeneralizedValues returns the generalized singular values of the factorized matrices. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and GeneralizedValues will +// panic with matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// GeneralizedValues will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) GeneralizedValues(v []float64) []float64 { + if gsvd.kind == 0 { + panic("gsvd: no decomposition computed") + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if v == nil { + v = make([]float64, d-k) + } + if len(v) != d-k { + panic(ErrSliceLengthMismatch) + } + floats.DivTo(v, gsvd.s1[k:d], gsvd.s2[k:d]) + return v +} + +// ValuesA returns the singular values of the factorized A matrix. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and ValuesA will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// ValuesA will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ValuesA(s []float64) []float64 { + if gsvd.kind == 0 { + panic("gsvd: no decomposition computed") + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if s == nil { + s = make([]float64, d-k) + } + if len(s) != d-k { + panic(ErrSliceLengthMismatch) + } + copy(s, gsvd.s1[k:min(r, c)]) + return s +} + +// ValuesB returns the singular values of the factorized B matrix. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and ValuesB will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// ValuesB will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ValuesB(s []float64) []float64 { + if gsvd.kind == 0 { + panic("gsvd: no decomposition computed") + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if s == nil { + s = make([]float64, d-k) + } + if len(s) != d-k { + panic(ErrSliceLengthMismatch) + } + copy(s, gsvd.s2[k:d]) + return s +} + +// ZeroRTo extracts the matrix [ 0 R ] from the singular value decomposition, storing +// the result in-place into dst. [ 0 R ] is size (k+l)×c. +// If dst is nil, a new matrix is allocated. The resulting ZeroR matrix is returned. +// +// ZeroRTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ZeroRTo(dst *Dense) *Dense { + if gsvd.kind == 0 { + panic("gsvd: no decomposition computed") + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + l := gsvd.l + h := min(k+l, r) + if dst == nil { + dst = NewDense(k+l, c, nil) + } else { + dst.reuseAsZeroed(k+l, c) + } + a := Dense{ + mat: gsvd.a, + capRows: r, + capCols: c, + } + dst.Slice(0, h, c-k-l, c).(*Dense). + Copy(a.Slice(0, h, c-k-l, c)) + if r < k+l { + b := Dense{ + mat: gsvd.b, + capRows: gsvd.p, + capCols: c, + } + dst.Slice(r, k+l, c+r-k-l, c).(*Dense). + Copy(b.Slice(r-k, l, c+r-k-l, c)) + } + return dst +} + +// SigmaATo extracts the matrix Σ₁ from the singular value decomposition, storing +// the result in-place into dst. Σ₁ is size r×(k+l). +// If dst is nil, a new matrix is allocated. The resulting SigmaA matrix is returned. +// +// SigmaATo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) SigmaATo(dst *Dense) *Dense { + if gsvd.kind == 0 { + panic("gsvd: no decomposition computed") + } + r := gsvd.r + k := gsvd.k + l := gsvd.l + if dst == nil { + dst = NewDense(r, k+l, nil) + } else { + dst.reuseAsZeroed(r, k+l) + } + for i := 0; i < k; i++ { + dst.set(i, i, 1) + } + for i := k; i < min(r, k+l); i++ { + dst.set(i, i, gsvd.s1[i]) + } + return dst +} + +// SigmaBTo extracts the matrix Σ₂ from the singular value decomposition, storing +// the result in-place into dst. Σ₂ is size p×(k+l). +// If dst is nil, a new matrix is allocated. The resulting SigmaB matrix is returned. +// +// SigmaBTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) SigmaBTo(dst *Dense) *Dense { + if gsvd.kind == 0 { + panic("gsvd: no decomposition computed") + } + r := gsvd.r + p := gsvd.p + k := gsvd.k + l := gsvd.l + if dst == nil { + dst = NewDense(p, k+l, nil) + } else { + dst.reuseAsZeroed(p, k+l) + } + for i := 0; i < min(l, r-k); i++ { + dst.set(i, i+k, gsvd.s2[k+i]) + } + for i := r - k; i < l; i++ { + dst.set(i, i+k, 1) + } + return dst +} + +// UTo extracts the matrix U from the singular value decomposition, storing +// the result in-place into dst. U is size r×r. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// +// UTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) UTo(dst *Dense) *Dense { + if gsvd.kind&GSVDU == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.u.Rows + c := gsvd.u.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.u, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} + +// VTo extracts the matrix V from the singular value decomposition, storing +// the result in-place into dst. V is size p×p. +// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. +// +// VTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) VTo(dst *Dense) *Dense { + if gsvd.kind&GSVDV == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.v.Rows + c := gsvd.v.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.v, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} + +// QTo extracts the matrix Q from the singular value decomposition, storing +// the result in-place into dst. Q is size c×c. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +// +// QTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) QTo(dst *Dense) *Dense { + if gsvd.kind&GSVDQ == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.q.Rows + c := gsvd.q.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.q, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/hogsvd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/hogsvd.go new file mode 100644 index 00000000..4b0a8ba6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/hogsvd.go @@ -0,0 +1,217 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "errors" + + "gonum.org/v1/gonum/blas/blas64" +) + +// HOGSVD is a type for creating and using the Higher Order Generalized Singular Value +// Decomposition (HOGSVD) of a set of matrices. +// +// The factorization is a linear transformation of the data sets from the given +// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" +// spaces. +type HOGSVD struct { + n int + v *Dense + b []Dense + + err error +} + +// Factorize computes the higher order generalized singular value decomposition (HOGSVD) +// of the n input r_i×c column tall matrices in m. HOGSV extends the GSVD case from 2 to n +// input matrices. +// +// M_0 = U_0 * Σ_0 * V^T +// M_1 = U_1 * Σ_1 * V^T +// . +// . +// . +// M_{n-1} = U_{n-1} * Σ_{n-1} * V^T +// +// where U_i are r_i×c matrices of singular vectors, Σ are c×c matrices singular values, and V +// is a c×c matrix of singular vectors. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (gsvd *HOGSVD) Factorize(m ...Matrix) (ok bool) { + // Factorize performs the HOGSVD factorisation + // essentially as described by Ponnapalli et al. + // https://doi.org/10.1371/journal.pone.0028072 + + if len(m) < 2 { + panic("hogsvd: too few matrices") + } + gsvd.n = 0 + + r, c := m[0].Dims() + a := make([]Cholesky, len(m)) + var ts SymDense + for i, d := range m { + rd, cd := d.Dims() + if rd < cd { + gsvd.err = ErrShape + return false + } + if rd > r { + r = rd + } + if cd != c { + panic(ErrShape) + } + ts.Reset() + ts.SymOuterK(1, d.T()) + ok = a[i].Factorize(&ts) + if !ok { + gsvd.err = errors.New("hogsvd: cholesky decomposition failed") + return false + } + } + + s := getWorkspace(c, c, true) + defer putWorkspace(s) + sij := getWorkspace(c, c, false) + defer putWorkspace(sij) + for i, ai := range a { + for _, aj := range a[i+1:] { + gsvd.err = ai.SolveChol(sij, &aj) + if gsvd.err != nil { + return false + } + s.Add(s, sij) + + gsvd.err = aj.SolveChol(sij, &ai) + if gsvd.err != nil { + return false + } + s.Add(s, sij) + } + } + s.Scale(1/float64(len(m)*(len(m)-1)), s) + + var eig Eigen + ok = eig.Factorize(s.T(), false, true) + if !ok { + gsvd.err = errors.New("hogsvd: eigen decomposition failed") + return false + } + v := eig.Vectors() + var cv VecDense + for j := 0; j < c; j++ { + cv.ColViewOf(v, j) + cv.ScaleVec(1/blas64.Nrm2(c, cv.mat), &cv) + } + + b := make([]Dense, len(m)) + biT := getWorkspace(c, r, false) + defer putWorkspace(biT) + for i, d := range m { + // All calls to reset will leave a zeroed + // matrix with capacity to store the result + // without additional allocation. + biT.Reset() + gsvd.err = biT.Solve(v, d.T()) + if gsvd.err != nil { + return false + } + b[i].Clone(biT.T()) + } + + gsvd.n = len(m) + gsvd.v = v + gsvd.b = b + return true +} + +// Err returns the reason for a factorization failure. +func (gsvd *HOGSVD) Err() error { + return gsvd.err +} + +// Len returns the number of matrices that have been factorized. If Len returns +// zero, the factorization was not successful. +func (gsvd *HOGSVD) Len() int { + return gsvd.n +} + +// UTo extracts the matrix U_n from the singular value decomposition, storing +// the result in-place into dst. U_n is size r×c. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// +// UTo will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) UTo(dst *Dense, n int) *Dense { + if gsvd.n == 0 { + panic("hogsvd: unsuccessful factorization") + } + if n < 0 || gsvd.n <= n { + panic("hogsvd: invalid index") + } + + if dst == nil { + r, c := gsvd.b[n].Dims() + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(gsvd.b[n].Dims()) + } + dst.Copy(&gsvd.b[n]) + var v VecDense + for j, f := range gsvd.Values(nil, n) { + v.ColViewOf(dst, j) + v.ScaleVec(1/f, &v) + } + return dst +} + +// Values returns the nth set of singular values of the factorized system. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length c, and Values will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// Values will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) Values(s []float64, n int) []float64 { + if gsvd.n == 0 { + panic("hogsvd: unsuccessful factorization") + } + if n < 0 || gsvd.n <= n { + panic("hogsvd: invalid index") + } + + r, c := gsvd.b[n].Dims() + if s == nil { + s = make([]float64, c) + } else if len(s) != c { + panic(ErrSliceLengthMismatch) + } + var v VecDense + for j := 0; j < c; j++ { + v.ColViewOf(&gsvd.b[n], j) + s[j] = blas64.Nrm2(r, v.mat) + } + return s +} + +// VTo extracts the matrix V from the singular value decomposition, storing +// the result in-place into dst. V is size c×c. +// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. +// +// VTo will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) VTo(dst *Dense) *Dense { + if gsvd.n == 0 { + panic("hogsvd: unsuccessful factorization") + } + if dst == nil { + r, c := gsvd.v.Dims() + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(gsvd.v.Dims()) + } + dst.Copy(gsvd.v) + return dst +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go new file mode 100644 index 00000000..3dd5cfc3 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go @@ -0,0 +1,233 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file must be kept in sync with index_no_bound_checks.go. + +// +build bounds + +package mat + +// At returns the element at row i, column j. +func (m *Dense) At(i, j int) float64 { + return m.at(i, j) +} + +func (m *Dense) at(i, j int) float64 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *Dense) Set(i, j int, v float64) { + m.set(i, j, v) +} + +func (m *Dense) set(i, j int, v float64) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i. +// It panics if i is out of bounds or if j is not zero. +func (v *VecDense) At(i, j int) float64 { + if j != 0 { + panic(ErrColAccess) + } + return v.at(i) +} + +// AtVec returns the element at row i. +// It panics if i is out of bounds. +func (v *VecDense) AtVec(i int) float64 { + return v.at(i) +} + +func (v *VecDense) at(i int) float64 { + if uint(i) >= uint(v.n) { + panic(ErrRowAccess) + } + return v.mat.Data[i*v.mat.Inc] +} + +// SetVec sets the element at row i to the value val. +// It panics if i is out of bounds. +func (v *VecDense) SetVec(i int, val float64) { + v.setVec(i, val) +} + +func (v *VecDense) setVec(i int, val float64) { + if uint(i) >= uint(v.n) { + panic(ErrVectorAccess) + } + v.mat.Data[i*v.mat.Inc] = val +} + +// At returns the element at row i and column j. +func (t *SymDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *SymDense) at(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetSym sets the elements at (i,j) and (j,i) to the value v. +func (t *SymDense) SetSym(i, j int, v float64) { + t.set(i, j, v) +} + +func (t *SymDense) set(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (t *TriDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *TriDense) at(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetTri sets the element of the triangular matrix at row i, column j to the value v. +// It panics if the location is outside the appropriate half of the matrix. +func (t *TriDense) SetTri(i, j int, v float64) { + t.set(i, j, v) +} + +func (t *TriDense) set(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (b *BandDense) At(i, j int) float64 { + return b.at(i, j) +} + +func (b *BandDense) at(i, j int) float64 { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + return 0 + } + return b.mat.Data[i*b.mat.Stride+pj] +} + +// SetBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (b *BandDense) SetBand(i, j int, v float64) { + b.set(i, j, v) +} + +func (b *BandDense) set(i, j int, v float64) { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + panic(ErrBandSet) + } + b.mat.Data[i*b.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (s *SymBandDense) At(i, j int) float64 { + return s.at(i, j) +} + +func (s *SymBandDense) at(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + return 0 + } + return s.mat.Data[i*s.mat.Stride+pj] +} + +// SetSymBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (s *SymBandDense) SetSymBand(i, j int, v float64) { + s.set(i, j, v) +} + +func (s *SymBandDense) set(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + panic(ErrBandSet) + } + s.mat.Data[i*s.mat.Stride+pj] = v +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go new file mode 100644 index 00000000..9a49a7f5 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go @@ -0,0 +1,237 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file must be kept in sync with index_bound_checks.go. + +// +build !bounds + +package mat + +// At returns the element at row i, column j. +func (m *Dense) At(i, j int) float64 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.at(i, j) +} + +func (m *Dense) at(i, j int) float64 { + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *Dense) Set(i, j int, v float64) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.set(i, j, v) +} + +func (m *Dense) set(i, j int, v float64) { + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i. +// It panics if i is out of bounds or if j is not zero. +func (v *VecDense) At(i, j int) float64 { + if uint(i) >= uint(v.n) { + panic(ErrRowAccess) + } + if j != 0 { + panic(ErrColAccess) + } + return v.at(i) +} + +// AtVec returns the element at row i. +// It panics if i is out of bounds. +func (v *VecDense) AtVec(i int) float64 { + if uint(i) >= uint(v.n) { + panic(ErrRowAccess) + } + return v.at(i) +} + +func (v *VecDense) at(i int) float64 { + return v.mat.Data[i*v.mat.Inc] +} + +// SetVec sets the element at row i to the value val. +// It panics if i is out of bounds. +func (v *VecDense) SetVec(i int, val float64) { + if uint(i) >= uint(v.n) { + panic(ErrVectorAccess) + } + v.setVec(i, val) +} + +func (v *VecDense) setVec(i int, val float64) { + v.mat.Data[i*v.mat.Inc] = val +} + +// At returns the element at row i and column j. +func (s *SymDense) At(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + return s.at(i, j) +} + +func (s *SymDense) at(i, j int) float64 { + if i > j { + i, j = j, i + } + return s.mat.Data[i*s.mat.Stride+j] +} + +// SetSym sets the elements at (i,j) and (j,i) to the value v. +func (s *SymDense) SetSym(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + s.set(i, j, v) +} + +func (s *SymDense) set(i, j int, v float64) { + if i > j { + i, j = j, i + } + s.mat.Data[i*s.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (t *TriDense) At(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + return t.at(i, j) +} + +func (t *TriDense) at(i, j int) float64 { + isUpper := t.triKind() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetTri sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate half of the matrix. +func (t *TriDense) SetTri(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + t.set(i, j, v) +} + +func (t *TriDense) set(i, j int, v float64) { + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (b *BandDense) At(i, j int) float64 { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + return b.at(i, j) +} + +func (b *BandDense) at(i, j int) float64 { + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + return 0 + } + return b.mat.Data[i*b.mat.Stride+pj] +} + +// SetBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (b *BandDense) SetBand(i, j int, v float64) { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + panic(ErrBandSet) + } + b.set(i, j, v) +} + +func (b *BandDense) set(i, j int, v float64) { + pj := j + b.mat.KL - i + b.mat.Data[i*b.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (s *SymBandDense) At(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + return s.at(i, j) +} + +func (s *SymBandDense) at(i, j int) float64 { + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + return 0 + } + return s.mat.Data[i*s.mat.Stride+pj] +} + +// SetSymBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (s *SymBandDense) SetSymBand(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + s.set(i, j, v) +} + +func (s *SymBandDense) set(i, j int, v float64) { + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + panic(ErrBandSet) + } + s.mat.Data[i*s.mat.Stride+pj] = v +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/inner.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/inner.go new file mode 100644 index 00000000..fba3e0b0 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/inner.go @@ -0,0 +1,121 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Inner computes the generalized inner product +// x^T A y +// between column vectors x and y with matrix A. This is only a true inner product if +// A is symmetric positive definite, though the operation works for any matrix A. +// +// Inner panics if x.Len != m or y.Len != n when A is an m x n matrix. +func Inner(x Vector, a Matrix, y Vector) float64 { + m, n := a.Dims() + if x.Len() != m { + panic(ErrShape) + } + if y.Len() != n { + panic(ErrShape) + } + if m == 0 || n == 0 { + return 0 + } + + var sum float64 + + switch a := a.(type) { + case RawSymmetricer: + amat := a.RawSymmetric() + if amat.Uplo != blas.Upper { + // Panic as a string not a mat.Error. + panic(badSymTriangle) + } + var xmat, ymat blas64.Vector + if xrv, ok := x.(RawVectorer); ok { + xmat = xrv.RawVector() + } else { + break + } + if yrv, ok := y.(RawVectorer); ok { + ymat = yrv.RawVector() + } else { + break + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + if xi != 0 { + if ymat.Inc == 1 { + sum += xi * f64.DotUnitary( + amat.Data[i*amat.Stride+i:i*amat.Stride+n], + ymat.Data[i:], + ) + } else { + sum += xi * f64.DotInc( + amat.Data[i*amat.Stride+i:i*amat.Stride+n], + ymat.Data[i*ymat.Inc:], uintptr(n-i), + 1, uintptr(ymat.Inc), + 0, 0, + ) + } + } + yi := y.AtVec(i) + if i != n-1 && yi != 0 { + if xmat.Inc == 1 { + sum += yi * f64.DotUnitary( + amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], + xmat.Data[i+1:], + ) + } else { + sum += yi * f64.DotInc( + amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], + xmat.Data[(i+1)*xmat.Inc:], uintptr(n-i-1), + 1, uintptr(xmat.Inc), + 0, 0, + ) + } + } + } + return sum + case RawMatrixer: + amat := a.RawMatrix() + var ymat blas64.Vector + if yrv, ok := y.(RawVectorer); ok { + ymat = yrv.RawVector() + } else { + break + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + if xi != 0 { + if ymat.Inc == 1 { + sum += xi * f64.DotUnitary( + amat.Data[i*amat.Stride:i*amat.Stride+n], + ymat.Data, + ) + } else { + sum += xi * f64.DotInc( + amat.Data[i*amat.Stride:i*amat.Stride+n], + ymat.Data, uintptr(n), + 1, uintptr(ymat.Inc), + 0, 0, + ) + } + } + } + return sum + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + for j := 0; j < y.Len(); j++ { + sum += xi * a.At(i, j) * y.AtVec(j) + } + } + return sum +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/io.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/io.go new file mode 100644 index 00000000..1111e4a4 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/io.go @@ -0,0 +1,492 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +// version is the current on-disk codec version. +const version uint32 = 0x1 + +// maxLen is the biggest slice/array len one can create on a 32/64b platform. +const maxLen = int64(int(^uint(0) >> 1)) + +var ( + headerSize = binary.Size(storage{}) + sizeInt64 = binary.Size(int64(0)) + sizeFloat64 = binary.Size(float64(0)) + + errWrongType = errors.New("mat: wrong data type") + + errTooBig = errors.New("mat: resulting data slice too big") + errTooSmall = errors.New("mat: input slice too small") + errBadBuffer = errors.New("mat: data buffer size mismatch") + errBadSize = errors.New("mat: invalid dimension") +) + +// Type encoding scheme: +// +// Type Form Packing Uplo Unit Rows Columns kU kL +// uint8 [GST] uint8 [BPF] uint8 [AUL] bool int64 int64 int64 int64 +// General 'G' 'F' 'A' false r c 0 0 +// Band 'G' 'B' 'A' false r c kU kL +// Symmetric 'S' 'F' ul false n n 0 0 +// SymmetricBand 'S' 'B' ul false n n k k +// SymmetricPacked 'S' 'P' ul false n n 0 0 +// Triangular 'T' 'F' ul Diag==Unit n n 0 0 +// TriangularBand 'T' 'B' ul Diag==Unit n n k k +// TriangularPacked 'T' 'P' ul Diag==Unit n n 0 0 +// +// G - general, S - symmetric, T - triangular +// F - full, B - band, P - packed +// A - all, U - upper, L - lower + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// +// Dense is little-endian encoded as follows: +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of rows (int64) +// 16 - 23 number of columns (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. matrix data elements (float64) +// [0,0] [0,1] ... [0,ncols-1] +// [1,0] [1,1] ... [1,ncols-1] +// ... +// [nrows-1,0] ... [nrows-1,ncols-1] +func (m Dense) MarshalBinary() ([]byte, error) { + bufLen := int64(headerSize) + int64(m.mat.Rows)*int64(m.mat.Cols)*int64(sizeFloat64) + if bufLen <= 0 { + // bufLen is too big and has wrapped around. + return nil, errTooBig + } + + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), + Version: version, + } + buf := make([]byte, bufLen) + n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) + if err != nil { + return buf[:n], err + } + + p := headerSize + r, c := m.Dims() + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(m.at(i, j))) + p += sizeFloat64 + } + } + + return buf, nil +} + +// MarshalBinaryTo encodes the receiver into a binary form and writes it into w. +// MarshalBinaryTo returns the number of bytes written into w and an error, if any. +// +// See MarshalBinary for the on-disk layout. +func (m Dense) MarshalBinaryTo(w io.Writer) (int, error) { + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), + Version: version, + } + n, err := header.marshalBinaryTo(w) + if err != nil { + return n, err + } + + r, c := m.Dims() + var b [8]byte + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + binary.LittleEndian.PutUint64(b[:], math.Float64bits(m.at(i, j))) + nn, err := w.Write(b[:]) + n += nn + if err != nil { + return n, err + } + } + } + + return n, nil +} + +// UnmarshalBinary decodes the binary form into the receiver. +// It panics if the receiver is a non-zero Dense matrix. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so +// it should not be used on untrusted data. +func (m *Dense) UnmarshalBinary(data []byte) error { + if !m.IsZero() { + panic("mat: unmarshal into non-zero matrix") + } + + if len(data) < headerSize { + return errTooSmall + } + + var header storage + err := header.unmarshalBinary(data[:headerSize]) + if err != nil { + return err + } + rows := header.Rows + cols := header.Cols + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return errWrongType + } + if rows < 0 || cols < 0 { + return errBadSize + } + size := rows * cols + if size == 0 { + return ErrZeroLength + } + if int(size) < 0 || size > maxLen { + return errTooBig + } + if len(data) != headerSize+int(rows*cols)*sizeFloat64 { + return errBadBuffer + } + + p := headerSize + m.reuseAs(int(rows), int(cols)) + for i := range m.mat.Data { + m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) + p += sizeFloat64 + } + + return nil +} + +// UnmarshalBinaryFrom decodes the binary form into the receiver and returns +// the number of bytes read and an error if any. +// It panics if the receiver is a non-zero Dense matrix. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so +// it should not be used on untrusted data. +func (m *Dense) UnmarshalBinaryFrom(r io.Reader) (int, error) { + if !m.IsZero() { + panic("mat: unmarshal into non-zero matrix") + } + + var header storage + n, err := header.unmarshalBinaryFrom(r) + if err != nil { + return n, err + } + rows := header.Rows + cols := header.Cols + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return n, errWrongType + } + if rows < 0 || cols < 0 { + return n, errBadSize + } + size := rows * cols + if size == 0 { + return n, ErrZeroLength + } + if int(size) < 0 || size > maxLen { + return n, errTooBig + } + + m.reuseAs(int(rows), int(cols)) + var b [8]byte + for i := range m.mat.Data { + nn, err := readFull(r, b[:]) + n += nn + if err != nil { + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err + } + m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) + } + + return n, nil +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// +// VecDense is little-endian encoded as follows: +// +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of elements (int64) +// 16 - 23 1 (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. vector's data elements (float64) +func (v VecDense) MarshalBinary() ([]byte, error) { + bufLen := int64(headerSize) + int64(v.n)*int64(sizeFloat64) + if bufLen <= 0 { + // bufLen is too big and has wrapped around. + return nil, errTooBig + } + + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(v.n), Cols: 1, + Version: version, + } + buf := make([]byte, bufLen) + n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) + if err != nil { + return buf[:n], err + } + + p := headerSize + for i := 0; i < v.n; i++ { + binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(v.at(i))) + p += sizeFloat64 + } + + return buf, nil +} + +// MarshalBinaryTo encodes the receiver into a binary form, writes it to w and +// returns the number of bytes written and an error if any. +// +// See MarshalBainry for the on-disk format. +func (v VecDense) MarshalBinaryTo(w io.Writer) (int, error) { + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(v.n), Cols: 1, + Version: version, + } + n, err := header.marshalBinaryTo(w) + if err != nil { + return n, err + } + + var buf [8]byte + for i := 0; i < v.n; i++ { + binary.LittleEndian.PutUint64(buf[:], math.Float64bits(v.at(i))) + nn, err := w.Write(buf[:]) + n += nn + if err != nil { + return n, err + } + } + + return n, nil +} + +// UnmarshalBinary decodes the binary form into the receiver. +// It panics if the receiver is a non-zero VecDense. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows is negative, +// - an error is returned if the resulting VecDense is too +// big for the current architecture (e.g. a 16GB vector written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled vector, and so +// it should not be used on untrusted data. +func (v *VecDense) UnmarshalBinary(data []byte) error { + if !v.IsZero() { + panic("mat: unmarshal into non-zero vector") + } + + if len(data) < headerSize { + return errTooSmall + } + + var header storage + err := header.unmarshalBinary(data[:headerSize]) + if err != nil { + return err + } + if header.Cols != 1 { + return ErrShape + } + n := header.Rows + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return errWrongType + } + if n == 0 { + return ErrZeroLength + } + if n < 0 { + return errBadSize + } + if int64(maxLen) < n { + return errTooBig + } + if len(data) != headerSize+int(n)*sizeFloat64 { + return errBadBuffer + } + + p := headerSize + v.reuseAs(int(n)) + for i := range v.mat.Data { + v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) + p += sizeFloat64 + } + + return nil +} + +// UnmarshalBinaryFrom decodes the binary form into the receiver, from the +// io.Reader and returns the number of bytes read and an error if any. +// It panics if the receiver is a non-zero VecDense. +// +// See MarshalBinary for the on-disk layout. +// See UnmarshalBinary for the list of sanity checks performed on the input. +func (v *VecDense) UnmarshalBinaryFrom(r io.Reader) (int, error) { + if !v.IsZero() { + panic("mat: unmarshal into non-zero vector") + } + + var header storage + n, err := header.unmarshalBinaryFrom(r) + if err != nil { + return n, err + } + if header.Cols != 1 { + return n, ErrShape + } + l := header.Rows + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return n, errWrongType + } + if l == 0 { + return n, ErrZeroLength + } + if l < 0 { + return n, errBadSize + } + if int64(maxLen) < l { + return n, errTooBig + } + + v.reuseAs(int(l)) + var b [8]byte + for i := range v.mat.Data { + nn, err := readFull(r, b[:]) + n += nn + if err != nil { + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err + } + v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) + } + + return n, nil +} + +// storage is the internal representation of the storage format of a +// serialised matrix. +type storage struct { + Version uint32 // Keep this first. + Form byte // [GST] + Packing byte // [BPF] + Uplo byte // [AUL] + Unit bool + Rows int64 + Cols int64 + KU int64 + KL int64 +} + +// TODO(kortschak): Consider replacing these with calls to direct +// encoding/decoding of fields rather than to binary.Write/binary.Read. + +func (s storage) marshalBinaryTo(w io.Writer) (int, error) { + buf := bytes.NewBuffer(make([]byte, 0, headerSize)) + err := binary.Write(buf, binary.LittleEndian, s) + if err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +func (s *storage) unmarshalBinary(buf []byte) error { + err := binary.Read(bytes.NewReader(buf), binary.LittleEndian, s) + if err != nil { + return err + } + if s.Version != version { + return fmt.Errorf("mat: incorrect version: %d", s.Version) + } + return nil +} + +func (s *storage) unmarshalBinaryFrom(r io.Reader) (int, error) { + buf := make([]byte, headerSize) + n, err := readFull(r, buf) + if err != nil { + return n, err + } + return n, s.unmarshalBinary(buf[:n]) +} + +// readFull reads from r into buf until it has read len(buf). +// It returns the number of bytes copied and an error if fewer bytes were read. +// If an EOF happens after reading fewer than len(buf) bytes, io.ErrUnexpectedEOF is returned. +func readFull(r io.Reader, buf []byte) (int, error) { + var n int + var err error + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n == len(buf) { + return n, nil + } + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/lq.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/lq.go new file mode 100644 index 00000000..741a1692 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/lq.go @@ -0,0 +1,235 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// LQ is a type for creating and using the LQ factorization of a matrix. +type LQ struct { + lq *Dense + tau []float64 + cond float64 +} + +func (lq *LQ) updateCond(norm lapack.MatrixNorm) { + // Since A = L*Q, and Q is orthogonal, we get for the condition number κ + // κ(A) := |A| |A^-1| = |L*Q| |(L*Q)^-1| = |L| |Q^T * L^-1| + // = |L| |L^-1| = κ(L), + // where we used that fact that Q^-1 = Q^T. However, this assumes that + // the matrix norm is invariant under orthogonal transformations which + // is not the case for CondNorm. Hopefully the error is negligible: κ + // is only a qualitative measure anyway. + m := lq.lq.mat.Rows + work := getFloats(3*m, false) + iwork := getInts(m, false) + l := lq.lq.asTriDense(m, blas.NonUnit, blas.Lower) + v := lapack64.Trcon(norm, l.mat, work, iwork) + lq.cond = 1 / v + putFloats(work) + putInts(iwork) +} + +// Factorize computes the LQ factorization of an m×n matrix a where n <= m. The LQ +// factorization always exists even if A is singular. +// +// The LQ decomposition is a factorization of the matrix A such that A = L * Q. +// The matrix Q is an orthonormal n×n matrix, and L is an m×n upper triangular matrix. +// L and Q can be extracted from the LTo and QTo methods. +func (lq *LQ) Factorize(a Matrix) { + lq.factorize(a, CondNorm) +} + +func (lq *LQ) factorize(a Matrix, norm lapack.MatrixNorm) { + m, n := a.Dims() + if m > n { + panic(ErrShape) + } + k := min(m, n) + if lq.lq == nil { + lq.lq = &Dense{} + } + lq.lq.Clone(a) + work := []float64{0} + lq.tau = make([]float64, k) + lapack64.Gelqf(lq.lq.mat, lq.tau, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Gelqf(lq.lq.mat, lq.tau, work, len(work)) + putFloats(work) + lq.updateCond(norm) +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a successful factorization. +func (lq *LQ) Cond() float64 { + if lq.lq == nil || lq.lq.IsZero() { + panic("lq: no decomposition computed") + } + return lq.cond +} + +// TODO(btracey): Add in the "Reduced" forms for extracting the m×m orthogonal +// and upper triangular matrices. + +// LTo extracts the m×n lower trapezoidal matrix from a LQ decomposition. +// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. +func (lq *LQ) LTo(dst *Dense) *Dense { + r, c := lq.lq.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + // Disguise the LQ as a lower triangular. + t := &TriDense{ + mat: blas64.Triangular{ + N: r, + Stride: lq.lq.mat.Stride, + Data: lq.lq.mat.Data, + Uplo: blas.Lower, + Diag: blas.NonUnit, + }, + cap: lq.lq.capCols, + } + dst.Copy(t) + + if r == c { + return dst + } + // Zero right of the triangular. + for i := 0; i < r; i++ { + zero(dst.mat.Data[i*dst.mat.Stride+r : i*dst.mat.Stride+c]) + } + + return dst +} + +// QTo extracts the n×n orthonormal matrix Q from an LQ decomposition. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +func (lq *LQ) QTo(dst *Dense) *Dense { + _, c := lq.lq.Dims() + if dst == nil { + dst = NewDense(c, c, nil) + } else { + dst.reuseAsZeroed(c, c) + } + q := dst.mat + + // Set Q = I. + ldq := q.Stride + for i := 0; i < c; i++ { + q.Data[i*ldq+i] = 1 + } + + // Construct Q from the elementary reflectors. + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, len(work)) + putFloats(work) + + return dst +} + +// Solve finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and b, where A is an m×n matrix represented in its LQ factorized +// form. If A is singular or near-singular a Condition error is returned. +// See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters. +// If trans == false, find the minimum norm solution of A * X = B. +// If trans == true, find X such that ||A*X - B||_2 is minimized. +// The solution matrix, X, is stored in place into x. +func (lq *LQ) Solve(x *Dense, trans bool, b Matrix) error { + r, c := lq.lq.Dims() + br, bc := b.Dims() + + // The LQ solve algorithm stores the result in-place into the right hand side. + // The storage for the answer must be large enough to hold both b and x. + // However, this method's receiver must be the size of x. Copy b, and then + // copy the result into x at the end. + if trans { + if c != br { + panic(ErrShape) + } + x.reuseAs(r, bc) + } else { + if r != br { + panic(ErrShape) + } + x.reuseAs(c, bc) + } + // Do not need to worry about overlap between x and b because w has its own + // independent storage. + w := getWorkspace(max(r, c), bc, false) + w.Copy(b) + t := lq.lq.asTriDense(lq.lq.mat.Rows, blas.NonUnit, blas.Lower).mat + if trans { + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, len(work)) + putFloats(work) + + ok := lapack64.Trtrs(blas.Trans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + } else { + ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + for i := r; i < c; i++ { + zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) + } + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, len(work)) + putFloats(work) + } + // x was set above to be the correct size for the result. + x.Copy(w) + putWorkspace(w) + if lq.cond > ConditionTolerance { + return Condition(lq.cond) + } + return nil +} + +// SolveVec finds a minimum-norm solution to a system of linear equations. +// See LQ.Solve for the full documentation. +func (lq *LQ) SolveVec(x *VecDense, trans bool, b Vector) error { + r, c := lq.lq.Dims() + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + bm := Matrix(b) + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if x != b { + x.checkOverlap(bmat) + } + b := VecDense{mat: bmat, n: b.Len()} + bm = b.asDense() + } + if trans { + x.reuseAs(r) + } else { + x.reuseAs(c) + } + return lq.Solve(x.asDense(), trans, bm) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/lu.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/lu.go new file mode 100644 index 00000000..055ae6cb --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/lu.go @@ -0,0 +1,378 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const badSliceLength = "mat: improper slice length" + +// LU is a type for creating and using the LU factorization of a matrix. +type LU struct { + lu *Dense + pivot []int + cond float64 +} + +// updateCond updates the stored condition number of the matrix. anorm is the +// norm of the original matrix. If anorm is negative it will be estimated. +func (lu *LU) updateCond(anorm float64, norm lapack.MatrixNorm) { + n := lu.lu.mat.Cols + work := getFloats(4*n, false) + defer putFloats(work) + iwork := getInts(n, false) + defer putInts(iwork) + if anorm < 0 { + // This is an approximation. By the definition of a norm, + // |AB| <= |A| |B|. + // Since A = L*U, we get for the condition number κ that + // κ(A) := |A| |A^-1| = |L*U| |A^-1| <= |L| |U| |A^-1|, + // so this will overestimate the condition number somewhat. + // The norm of the original factorized matrix cannot be stored + // because of update possibilities. + u := lu.lu.asTriDense(n, blas.NonUnit, blas.Upper) + l := lu.lu.asTriDense(n, blas.Unit, blas.Lower) + unorm := lapack64.Lantr(norm, u.mat, work) + lnorm := lapack64.Lantr(norm, l.mat, work) + anorm = unorm * lnorm + } + v := lapack64.Gecon(norm, lu.lu.mat, anorm, work, iwork) + lu.cond = 1 / v +} + +// Factorize computes the LU factorization of the square matrix a and stores the +// result. The LU decomposition will complete regardless of the singularity of a. +// +// The LU factorization is computed with pivoting, and so really the decomposition +// is a PLU decomposition where P is a permutation matrix. The individual matrix +// factors can be extracted from the factorization using the Permutation method +// on Dense, and the LU LTo and UTo methods. +func (lu *LU) Factorize(a Matrix) { + lu.factorize(a, CondNorm) +} + +func (lu *LU) factorize(a Matrix, norm lapack.MatrixNorm) { + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + if lu.lu == nil { + lu.lu = NewDense(r, r, nil) + } else { + lu.lu.Reset() + lu.lu.reuseAs(r, r) + } + lu.lu.Copy(a) + if cap(lu.pivot) < r { + lu.pivot = make([]int, r) + } + lu.pivot = lu.pivot[:r] + work := getFloats(r, false) + anorm := lapack64.Lange(norm, lu.lu.mat, work) + putFloats(work) + lapack64.Getrf(lu.lu.mat, lu.pivot) + lu.updateCond(anorm, norm) +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a successful factorization. +func (lu *LU) Cond() float64 { + if lu.lu == nil || lu.lu.IsZero() { + panic("lu: no decomposition computed") + } + return lu.cond +} + +// Reset resets the factorization so that it can be reused as the receiver of a +// dimensionally restricted operation. +func (lu *LU) Reset() { + if lu.lu != nil { + lu.lu.Reset() + } + lu.pivot = lu.pivot[:0] +} + +func (lu *LU) isZero() bool { + return len(lu.pivot) == 0 +} + +// Det returns the determinant of the matrix that has been factorized. In many +// expressions, using LogDet will be more numerically stable. +func (lu *LU) Det() float64 { + det, sign := lu.LogDet() + return math.Exp(det) * sign +} + +// LogDet returns the log of the determinant and the sign of the determinant +// for the matrix that has been factorized. Numerical stability in product and +// division expressions is generally improved by working in log space. +func (lu *LU) LogDet() (det float64, sign float64) { + _, n := lu.lu.Dims() + logDiag := getFloats(n, false) + defer putFloats(logDiag) + sign = 1.0 + for i := 0; i < n; i++ { + v := lu.lu.at(i, i) + if v < 0 { + sign *= -1 + } + if lu.pivot[i] != i { + sign *= -1 + } + logDiag[i] = math.Log(math.Abs(v)) + } + return floats.Sum(logDiag), sign +} + +// Pivot returns pivot indices that enable the construction of the permutation +// matrix P (see Dense.Permutation). If swaps == nil, then new memory will be +// allocated, otherwise the length of the input must be equal to the size of the +// factorized matrix. +func (lu *LU) Pivot(swaps []int) []int { + _, n := lu.lu.Dims() + if swaps == nil { + swaps = make([]int, n) + } + if len(swaps) != n { + panic(badSliceLength) + } + // Perform the inverse of the row swaps in order to find the final + // row swap position. + for i := range swaps { + swaps[i] = i + } + for i := n - 1; i >= 0; i-- { + v := lu.pivot[i] + swaps[i], swaps[v] = swaps[v], swaps[i] + } + return swaps +} + +// RankOne updates an LU factorization as if a rank-one update had been applied to +// the original matrix A, storing the result into the receiver. That is, if in +// the original LU decomposition P * L * U = A, in the updated decomposition +// P * L * U = A + alpha * x * y^T. +func (lu *LU) RankOne(orig *LU, alpha float64, x, y Vector) { + // RankOne uses algorithm a1 on page 28 of "Multiple-Rank Updates to Matrix + // Factorizations for Nonlinear Analysis and Circuit Design" by Linzhong Deng. + // http://web.stanford.edu/group/SOL/dissertations/Linzhong-Deng-thesis.pdf + _, n := orig.lu.Dims() + if r, c := x.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if r, c := y.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if orig != lu { + if lu.isZero() { + if cap(lu.pivot) < n { + lu.pivot = make([]int, n) + } + lu.pivot = lu.pivot[:n] + if lu.lu == nil { + lu.lu = NewDense(n, n, nil) + } else { + lu.lu.reuseAs(n, n) + } + } else if len(lu.pivot) != n { + panic(ErrShape) + } + copy(lu.pivot, orig.pivot) + lu.lu.Copy(orig.lu) + } + + xs := getFloats(n, false) + defer putFloats(xs) + ys := getFloats(n, false) + defer putFloats(ys) + for i := 0; i < n; i++ { + xs[i] = x.AtVec(i) + ys[i] = y.AtVec(i) + } + + // Adjust for the pivoting in the LU factorization + for i, v := range lu.pivot { + xs[i], xs[v] = xs[v], xs[i] + } + + lum := lu.lu.mat + omega := alpha + for j := 0; j < n; j++ { + ujj := lum.Data[j*lum.Stride+j] + ys[j] /= ujj + theta := 1 + xs[j]*ys[j]*omega + beta := omega * ys[j] / theta + gamma := omega * xs[j] + omega -= beta * gamma + lum.Data[j*lum.Stride+j] *= theta + for i := j + 1; i < n; i++ { + xs[i] -= lum.Data[i*lum.Stride+j] * xs[j] + tmp := ys[i] + ys[i] -= lum.Data[j*lum.Stride+i] * ys[j] + lum.Data[i*lum.Stride+j] += beta * xs[i] + lum.Data[j*lum.Stride+i] += gamma * tmp + } + } + lu.updateCond(-1, CondNorm) +} + +// LTo extracts the lower triangular matrix from an LU factorization. +// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. +func (lu *LU) LTo(dst *TriDense) *TriDense { + _, n := lu.lu.Dims() + if dst == nil { + dst = NewTriDense(n, Lower, nil) + } else { + dst.reuseAs(n, Lower) + } + // Extract the lower triangular elements. + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] + } + } + // Set ones on the diagonal. + for i := 0; i < n; i++ { + dst.mat.Data[i*dst.mat.Stride+i] = 1 + } + return dst +} + +// UTo extracts the upper triangular matrix from an LU factorization. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +func (lu *LU) UTo(dst *TriDense) *TriDense { + _, n := lu.lu.Dims() + if dst == nil { + dst = NewTriDense(n, Upper, nil) + } else { + dst.reuseAs(n, Upper) + } + // Extract the upper triangular elements. + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] + } + } + return dst +} + +// Permutation constructs an r×r permutation matrix with the given row swaps. +// A permutation matrix has exactly one element equal to one in each row and column +// and all other elements equal to zero. swaps[i] specifies the row with which +// i will be swapped, which is equivalent to the non-zero column of row i. +func (m *Dense) Permutation(r int, swaps []int) { + m.reuseAs(r, r) + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+r]) + v := swaps[i] + if v < 0 || v >= r { + panic(ErrRowAccess) + } + m.mat.Data[i*m.mat.Stride+v] = 1 + } +} + +// Solve solves a system of linear equations using the LU decomposition of a matrix. +// It computes +// A * X = B if trans == false +// A^T * X = B if trans == true +// In both cases, A is represented in LU factorized form, and the matrix X is +// stored into x. +// +// If A is singular or near-singular a Condition error is returned. See +// the documentation for Condition for more information. +func (lu *LU) Solve(x *Dense, trans bool, b Matrix) error { + _, n := lu.lu.Dims() + br, bc := b.Dims() + if br != n { + panic(ErrShape) + } + // TODO(btracey): Should test the condition number instead of testing that + // the determinant is exactly zero. + if lu.Det() == 0 { + return Condition(math.Inf(1)) + } + + x.reuseAs(n, bc) + bU, _ := untranspose(b) + var restore func() + if x == bU { + x, restore = x.isolatedWorkspace(bU) + defer restore() + } else if rm, ok := bU.(RawMatrixer); ok { + x.checkOverlap(rm.RawMatrix()) + } + + x.Copy(b) + t := blas.NoTrans + if trans { + t = blas.Trans + } + lapack64.Getrs(t, lu.lu.mat, x.mat, lu.pivot) + if lu.cond > ConditionTolerance { + return Condition(lu.cond) + } + return nil +} + +// SolveVec solves a system of linear equations using the LU decomposition of a matrix. +// It computes +// A * x = b if trans == false +// A^T * x = b if trans == true +// In both cases, A is represented in LU factorized form, and the vector x is +// stored into x. +// +// If A is singular or near-singular a Condition error is returned. See +// the documentation for Condition for more information. +func (lu *LU) SolveVec(x *VecDense, trans bool, b Vector) error { + _, n := lu.lu.Dims() + if br, bc := b.Dims(); br != n || bc != 1 { + panic(ErrShape) + } + switch rv := b.(type) { + default: + x.reuseAs(n) + return lu.Solve(x.asDense(), trans, b) + case RawVectorer: + if x != b { + x.checkOverlap(rv.RawVector()) + } + // TODO(btracey): Should test the condition number instead of testing that + // the determinant is exactly zero. + if lu.Det() == 0 { + return Condition(math.Inf(1)) + } + + x.reuseAs(n) + var restore func() + if x == b { + x, restore = x.isolatedWorkspace(b) + defer restore() + } + x.CopyVec(b) + vMat := blas64.General{ + Rows: n, + Cols: 1, + Stride: x.mat.Inc, + Data: x.mat.Data, + } + t := blas.NoTrans + if trans { + t = blas.Trans + } + lapack64.Getrs(t, lu.lu.mat, vMat, lu.pivot) + if lu.cond > ConditionTolerance { + return Condition(lu.cond) + } + return nil + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/matrix.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/matrix.go new file mode 100644 index 00000000..59f94583 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/matrix.go @@ -0,0 +1,890 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Matrix is the basic matrix interface type. +type Matrix interface { + // Dims returns the dimensions of a Matrix. + Dims() (r, c int) + + // At returns the value of a matrix element at row i, column j. + // It will panic if i or j are out of bounds for the matrix. + At(i, j int) float64 + + // T returns the transpose of the Matrix. Whether T returns a copy of the + // underlying data is implementation dependent. + // This method may be implemented using the Transpose type, which + // provides an implicit matrix transpose. + T() Matrix +} + +var ( + _ Matrix = Transpose{} + _ Untransposer = Transpose{} +) + +// Transpose is a type for performing an implicit matrix transpose. It implements +// the Matrix interface, returning values from the transpose of the matrix within. +type Transpose struct { + Matrix Matrix +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Matrix field. +func (t Transpose) At(i, j int) float64 { + return t.Matrix.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. The number of rows returned +// is the number of columns in the Matrix field, and the number of columns is +// the number of rows in the Matrix field. +func (t Transpose) Dims() (r, c int) { + c, r = t.Matrix.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Matrix field. +func (t Transpose) T() Matrix { + return t.Matrix +} + +// Untranspose returns the Matrix field. +func (t Transpose) Untranspose() Matrix { + return t.Matrix +} + +// Untransposer is a type that can undo an implicit transpose. +type Untransposer interface { + // Note: This interface is needed to unify all of the Transpose types. In + // the mat methods, we need to test if the Matrix has been implicitly + // transposed. If this is checked by testing for the specific Transpose type + // then the behavior will be different if the user uses T() or TTri() for a + // triangular matrix. + + // Untranspose returns the underlying Matrix stored for the implicit transpose. + Untranspose() Matrix +} + +// UntransposeBander is a type that can undo an implicit band transpose. +type UntransposeBander interface { + // Untranspose returns the underlying Banded stored for the implicit transpose. + UntransposeBand() Banded +} + +// UntransposeTrier is a type that can undo an implicit triangular transpose. +type UntransposeTrier interface { + // Untranspose returns the underlying Triangular stored for the implicit transpose. + UntransposeTri() Triangular +} + +// Mutable is a matrix interface type that allows elements to be altered. +type Mutable interface { + // Set alters the matrix element at row i, column j to v. + // It will panic if i or j are out of bounds for the matrix. + Set(i, j int, v float64) + + Matrix +} + +// A RowViewer can return a Vector reflecting a row that is backed by the matrix +// data. The Vector returned will have length equal to the number of columns. +type RowViewer interface { + RowView(i int) Vector +} + +// A RawRowViewer can return a slice of float64 reflecting a row that is backed by the matrix +// data. +type RawRowViewer interface { + RawRowView(i int) []float64 +} + +// A ColViewer can return a Vector reflecting a column that is backed by the matrix +// data. The Vector returned will have length equal to the number of rows. +type ColViewer interface { + ColView(j int) Vector +} + +// A RawColViewer can return a slice of float64 reflecting a column that is backed by the matrix +// data. +type RawColViewer interface { + RawColView(j int) []float64 +} + +// A Cloner can make a copy of a into the receiver, overwriting the previous value of the +// receiver. The clone operation does not make any restriction on shape and will not cause +// shadowing. +type Cloner interface { + Clone(a Matrix) +} + +// A Reseter can reset the matrix so that it can be reused as the receiver of a dimensionally +// restricted operation. This is commonly used when the matrix is being used as a workspace +// or temporary matrix. +// +// If the matrix is a view, using the reset matrix may result in data corruption in elements +// outside the view. +type Reseter interface { + Reset() +} + +// A Copier can make a copy of elements of a into the receiver. The submatrix copied +// starts at row and column 0 and has dimensions equal to the minimum dimensions of +// the two matrices. The number of row and columns copied is returned. +// Copy will copy from a source that aliases the receiver unless the source is transposed; +// an aliasing transpose copy will panic with the exception for a special case when +// the source data has a unitary increment or stride. +type Copier interface { + Copy(a Matrix) (r, c int) +} + +// A Grower can grow the size of the represented matrix by the given number of rows and columns. +// Growing beyond the size given by the Caps method will result in the allocation of a new +// matrix and copying of the elements. If Grow is called with negative increments it will +// panic with ErrIndexOutOfRange. +type Grower interface { + Caps() (r, c int) + Grow(r, c int) Matrix +} + +// A BandWidther represents a banded matrix and can return the left and right half-bandwidths, k1 and +// k2. +type BandWidther interface { + BandWidth() (k1, k2 int) +} + +// A RawMatrixSetter can set the underlying blas64.General used by the receiver. There is no restriction +// on the shape of the receiver. Changes to the receiver's elements will be reflected in the blas64.General.Data. +type RawMatrixSetter interface { + SetRawMatrix(a blas64.General) +} + +// A RawMatrixer can return a blas64.General representation of the receiver. Changes to the blas64.General.Data +// slice will be reflected in the original matrix, changes to the Rows, Cols and Stride fields will not. +type RawMatrixer interface { + RawMatrix() blas64.General +} + +// A RawVectorer can return a blas64.Vector representation of the receiver. Changes to the blas64.Vector.Data +// slice will be reflected in the original matrix, changes to the Inc field will not. +type RawVectorer interface { + RawVector() blas64.Vector +} + +// A NonZeroDoer can call a function for each non-zero element of the receiver. +// The parameters of the function are the element indices and its value. +type NonZeroDoer interface { + DoNonZero(func(i, j int, v float64)) +} + +// A RowNonZeroDoer can call a function for each non-zero element of a row of the receiver. +// The parameters of the function are the element indices and its value. +type RowNonZeroDoer interface { + DoRowNonZero(i int, fn func(i, j int, v float64)) +} + +// A ColNonZeroDoer can call a function for each non-zero element of a column of the receiver. +// The parameters of the function are the element indices and its value. +type ColNonZeroDoer interface { + DoColNonZero(j int, fn func(i, j int, v float64)) +} + +// TODO(btracey): Consider adding CopyCol/CopyRow if the behavior seems useful. +// TODO(btracey): Add in fast paths to Row/Col for the other concrete types +// (TriDense, etc.) as well as relevant interfaces (RowColer, RawRowViewer, etc.) + +// Col copies the elements in the jth column of the matrix into the slice dst. +// The length of the provided slice must equal the number of rows, unless the +// slice is nil in which case a new slice is first allocated. +func Col(dst []float64, j int, a Matrix) []float64 { + r, c := a.Dims() + if j < 0 || j >= c { + panic(ErrColAccess) + } + if dst == nil { + dst = make([]float64, r) + } else { + if len(dst) != r { + panic(ErrColLength) + } + } + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m := rm.RawMatrix() + if aTrans { + copy(dst, m.Data[j*m.Stride:j*m.Stride+m.Cols]) + return dst + } + blas64.Copy(r, + blas64.Vector{Inc: m.Stride, Data: m.Data[j:]}, + blas64.Vector{Inc: 1, Data: dst}, + ) + return dst + } + for i := 0; i < r; i++ { + dst[i] = a.At(i, j) + } + return dst +} + +// Row copies the elements in the ith row of the matrix into the slice dst. +// The length of the provided slice must equal the number of columns, unless the +// slice is nil in which case a new slice is first allocated. +func Row(dst []float64, i int, a Matrix) []float64 { + r, c := a.Dims() + if i < 0 || i >= r { + panic(ErrColAccess) + } + if dst == nil { + dst = make([]float64, c) + } else { + if len(dst) != c { + panic(ErrRowLength) + } + } + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m := rm.RawMatrix() + if aTrans { + blas64.Copy(c, + blas64.Vector{Inc: m.Stride, Data: m.Data[i:]}, + blas64.Vector{Inc: 1, Data: dst}, + ) + return dst + } + copy(dst, m.Data[i*m.Stride:i*m.Stride+m.Cols]) + return dst + } + for j := 0; j < c; j++ { + dst[j] = a.At(i, j) + } + return dst +} + +// Cond returns the condition number of the given matrix under the given norm. +// The condition number must be based on the 1-norm, 2-norm or ∞-norm. +// Cond will panic with matrix.ErrShape if the matrix has zero size. +// +// BUG(btracey): The computation of the 1-norm and ∞-norm for non-square matrices +// is innacurate, although is typically the right order of magnitude. See +// https://github.com/xianyi/OpenBLAS/issues/636. While the value returned will +// change with the resolution of this bug, the result from Cond will match the +// condition number used internally. +func Cond(a Matrix, norm float64) float64 { + m, n := a.Dims() + if m == 0 || n == 0 { + panic(ErrShape) + } + var lnorm lapack.MatrixNorm + switch norm { + default: + panic("mat: bad norm value") + case 1: + lnorm = lapack.MaxColumnSum + case 2: + var svd SVD + ok := svd.Factorize(a, SVDNone) + if !ok { + return math.Inf(1) + } + return svd.Cond() + case math.Inf(1): + lnorm = lapack.MaxRowSum + } + + if m == n { + // Use the LU decomposition to compute the condition number. + var lu LU + lu.factorize(a, lnorm) + return lu.Cond() + } + if m > n { + // Use the QR factorization to compute the condition number. + var qr QR + qr.factorize(a, lnorm) + return qr.Cond() + } + // Use the LQ factorization to compute the condition number. + var lq LQ + lq.factorize(a, lnorm) + return lq.Cond() +} + +// Det returns the determinant of the matrix a. In many expressions using LogDet +// will be more numerically stable. +func Det(a Matrix) float64 { + det, sign := LogDet(a) + return math.Exp(det) * sign +} + +// Dot returns the sum of the element-wise product of a and b. +// Dot panics if the matrix sizes are unequal. +func Dot(a, b Vector) float64 { + la := a.Len() + lb := b.Len() + if la != lb { + panic(ErrShape) + } + if arv, ok := a.(RawVectorer); ok { + if brv, ok := b.(RawVectorer); ok { + return blas64.Dot(la, arv.RawVector(), brv.RawVector()) + } + } + var sum float64 + for i := 0; i < la; i++ { + sum += a.At(i, 0) * b.At(i, 0) + } + return sum +} + +// Equal returns whether the matrices a and b have the same size +// and are element-wise equal. +func Equal(a, b Matrix) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + if rma, ok := aU.(RawMatrixer); ok { + if rmb, ok := bU.(RawMatrixer); ok { + ra := rma.RawMatrix() + rb := rmb.RawMatrix() + if aTrans == bTrans { + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { + return false + } + } + } + return true + } + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[j*rb.Stride+i] { + return false + } + } + } + return true + } + } + if rma, ok := aU.(RawSymmetricer); ok { + if rmb, ok := bU.(RawSymmetricer); ok { + ra := rma.RawSymmetric() + rb := rmb.RawSymmetric() + // Symmetric matrices are always upper and equal to their transpose. + for i := 0; i < ra.N; i++ { + for j := i; j < ra.N; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { + return false + } + } + } + return true + } + } + if ra, ok := aU.(*VecDense); ok { + if rb, ok := bU.(*VecDense); ok { + // If the raw vectors are the same length they must either both be + // transposed or both not transposed (or have length 1). + for i := 0; i < ra.n; i++ { + if ra.mat.Data[i*ra.mat.Inc] != rb.mat.Data[i*rb.mat.Inc] { + return false + } + } + return true + } + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if a.At(i, j) != b.At(i, j) { + return false + } + } + } + return true +} + +// EqualApprox returns whether the matrices a and b have the same size and contain all equal +// elements with tolerance for element-wise equality specified by epsilon. Matrices +// with non-equal shapes are not equal. +func EqualApprox(a, b Matrix, epsilon float64) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + if rma, ok := aU.(RawMatrixer); ok { + if rmb, ok := bU.(RawMatrixer); ok { + ra := rma.RawMatrix() + rb := rmb.RawMatrix() + if aTrans == bTrans { + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { + return false + } + } + } + return true + } + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[j*rb.Stride+i], epsilon, epsilon) { + return false + } + } + } + return true + } + } + if rma, ok := aU.(RawSymmetricer); ok { + if rmb, ok := bU.(RawSymmetricer); ok { + ra := rma.RawSymmetric() + rb := rmb.RawSymmetric() + // Symmetric matrices are always upper and equal to their transpose. + for i := 0; i < ra.N; i++ { + for j := i; j < ra.N; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { + return false + } + } + } + return true + } + } + if ra, ok := aU.(*VecDense); ok { + if rb, ok := bU.(*VecDense); ok { + // If the raw vectors are the same length they must either both be + // transposed or both not transposed (or have length 1). + for i := 0; i < ra.n; i++ { + if !floats.EqualWithinAbsOrRel(ra.mat.Data[i*ra.mat.Inc], rb.mat.Data[i*rb.mat.Inc], epsilon, epsilon) { + return false + } + } + return true + } + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if !floats.EqualWithinAbsOrRel(a.At(i, j), b.At(i, j), epsilon, epsilon) { + return false + } + } + } + return true +} + +// LogDet returns the log of the determinant and the sign of the determinant +// for the matrix that has been factorized. Numerical stability in product and +// division expressions is generally improved by working in log space. +func LogDet(a Matrix) (det float64, sign float64) { + // TODO(btracey): Add specialized routines for TriDense, etc. + var lu LU + lu.Factorize(a) + return lu.LogDet() +} + +// Max returns the largest element value of the matrix A. +// Max will panic with matrix.ErrShape if the matrix has zero size. +func Max(a Matrix) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + // Max(A) = Max(A^T) + aU, _ := untranspose(a) + switch m := aU.(type) { + case RawMatrixer: + rm := m.RawMatrix() + max := math.Inf(-1) + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + if v > max { + max = v + } + } + } + return max + case RawTriangular: + rm := m.RawTriangular() + // The max of a triangular is at least 0 unless the size is 1. + if rm.N == 1 { + return rm.Data[0] + } + max := 0.0 + if rm.Uplo == blas.Upper { + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v > max { + max = v + } + } + } + return max + } + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { + if v > max { + max = v + } + } + } + return max + case RawSymmetricer: + rm := m.RawSymmetric() + if rm.Uplo != blas.Upper { + panic(badSymTriangle) + } + max := math.Inf(-1) + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v > max { + max = v + } + } + } + return max + default: + r, c := aU.Dims() + max := math.Inf(-1) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := aU.At(i, j) + if v > max { + max = v + } + } + } + return max + } +} + +// Min returns the smallest element value of the matrix A. +// Min will panic with matrix.ErrShape if the matrix has zero size. +func Min(a Matrix) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + // Min(A) = Min(A^T) + aU, _ := untranspose(a) + switch m := aU.(type) { + case RawMatrixer: + rm := m.RawMatrix() + min := math.Inf(1) + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + if v < min { + min = v + } + } + } + return min + case RawTriangular: + rm := m.RawTriangular() + // The min of a triangular is at most 0 unless the size is 1. + if rm.N == 1 { + return rm.Data[0] + } + min := 0.0 + if rm.Uplo == blas.Upper { + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v < min { + min = v + } + } + } + return min + } + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { + if v < min { + min = v + } + } + } + return min + case RawSymmetricer: + rm := m.RawSymmetric() + if rm.Uplo != blas.Upper { + panic(badSymTriangle) + } + min := math.Inf(1) + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v < min { + min = v + } + } + } + return min + default: + r, c := aU.Dims() + min := math.Inf(1) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := aU.At(i, j) + if v < min { + min = v + } + } + } + return min + } +} + +// Norm returns the specified (induced) norm of the matrix a. See +// https://en.wikipedia.org/wiki/Matrix_norm for the definition of an induced norm. +// +// Valid norms are: +// 1 - The maximum absolute column sum +// 2 - Frobenius norm, the square root of the sum of the squares of the elements. +// Inf - The maximum absolute row sum. +// Norm will panic with ErrNormOrder if an illegal norm order is specified and +// with matrix.ErrShape if the matrix has zero size. +func Norm(a Matrix, norm float64) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + aU, aTrans := untranspose(a) + var work []float64 + switch rma := aU.(type) { + case RawMatrixer: + rm := rma.RawMatrix() + n := normLapack(norm, aTrans) + if n == lapack.MaxColumnSum { + work = getFloats(rm.Cols, false) + defer putFloats(work) + } + return lapack64.Lange(n, rm, work) + case RawTriangular: + rm := rma.RawTriangular() + n := normLapack(norm, aTrans) + if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { + work = getFloats(rm.N, false) + defer putFloats(work) + } + return lapack64.Lantr(n, rm, work) + case RawSymmetricer: + rm := rma.RawSymmetric() + n := normLapack(norm, aTrans) + if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { + work = getFloats(rm.N, false) + defer putFloats(work) + } + return lapack64.Lansy(n, rm, work) + case *VecDense: + rv := rma.RawVector() + switch norm { + default: + panic("unreachable") + case 1: + if aTrans { + imax := blas64.Iamax(rma.n, rv) + return math.Abs(rma.At(imax, 0)) + } + return blas64.Asum(rma.n, rv) + case 2: + return blas64.Nrm2(rma.n, rv) + case math.Inf(1): + if aTrans { + return blas64.Asum(rma.n, rv) + } + imax := blas64.Iamax(rma.n, rv) + return math.Abs(rma.At(imax, 0)) + } + } + switch norm { + default: + panic("unreachable") + case 1: + var max float64 + for j := 0; j < c; j++ { + var sum float64 + for i := 0; i < r; i++ { + sum += math.Abs(a.At(i, j)) + } + if sum > max { + max = sum + } + } + return max + case 2: + var sum float64 + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := a.At(i, j) + sum += v * v + } + } + return math.Sqrt(sum) + case math.Inf(1): + var max float64 + for i := 0; i < r; i++ { + var sum float64 + for j := 0; j < c; j++ { + sum += math.Abs(a.At(i, j)) + } + if sum > max { + max = sum + } + } + return max + } +} + +// normLapack converts the float64 norm input in Norm to a lapack.MatrixNorm. +func normLapack(norm float64, aTrans bool) lapack.MatrixNorm { + switch norm { + case 1: + n := lapack.MaxColumnSum + if aTrans { + n = lapack.MaxRowSum + } + return n + case 2: + return lapack.NormFrob + case math.Inf(1): + n := lapack.MaxRowSum + if aTrans { + n = lapack.MaxColumnSum + } + return n + default: + panic(ErrNormOrder) + } +} + +// Sum returns the sum of the elements of the matrix. +func Sum(a Matrix) float64 { + // TODO(btracey): Add a fast path for the other supported matrix types. + + r, c := a.Dims() + var sum float64 + aU, _ := untranspose(a) + if rma, ok := aU.(RawMatrixer); ok { + rm := rma.RawMatrix() + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + sum += v + } + } + return sum + } + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + sum += a.At(i, j) + } + } + return sum +} + +// Trace returns the trace of the matrix. Trace will panic if the +// matrix is not square. +func Trace(a Matrix) float64 { + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + + aU, _ := untranspose(a) + switch m := aU.(type) { + case RawMatrixer: + rm := m.RawMatrix() + var t float64 + for i := 0; i < r; i++ { + t += rm.Data[i*rm.Stride+i] + } + return t + case RawTriangular: + rm := m.RawTriangular() + var t float64 + for i := 0; i < r; i++ { + t += rm.Data[i*rm.Stride+i] + } + return t + case RawSymmetricer: + rm := m.RawSymmetric() + var t float64 + for i := 0; i < r; i++ { + t += rm.Data[i*rm.Stride+i] + } + return t + default: + var t float64 + for i := 0; i < r; i++ { + t += a.At(i, i) + } + return t + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// use returns a float64 slice with l elements, using f if it +// has the necessary capacity, otherwise creating a new slice. +func use(f []float64, l int) []float64 { + if l <= cap(f) { + return f[:l] + } + return make([]float64, l) +} + +// useZeroed returns a float64 slice with l elements, using f if it +// has the necessary capacity, otherwise creating a new slice. The +// elements of the returned slice are guaranteed to be zero. +func useZeroed(f []float64, l int) []float64 { + if l <= cap(f) { + f = f[:l] + zero(f) + return f + } + return make([]float64, l) +} + +// zero zeros the given slice's elements. +func zero(f []float64) { + for i := range f { + f[i] = 0 + } +} + +// useInt returns an int slice with l elements, using i if it +// has the necessary capacity, otherwise creating a new slice. +func useInt(i []int, l int) []int { + if l <= cap(i) { + return i[:l] + } + return make([]int, l) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/offset.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/offset.go new file mode 100644 index 00000000..af2c03b6 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/offset.go @@ -0,0 +1,20 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!safe + +package mat + +import "unsafe" + +// offset returns the number of float64 values b[0] is after a[0]. +func offset(a, b []float64) int { + if &a[0] == &b[0] { + return 0 + } + // This expression must be atomic with respect to GC moves. + // At this stage this is true, because the GC does not + // move. See https://golang.org/issue/12445. + return int(uintptr(unsafe.Pointer(&b[0]))-uintptr(unsafe.Pointer(&a[0]))) / int(unsafe.Sizeof(float64(0))) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/offset_appengine.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/offset_appengine.go new file mode 100644 index 00000000..df617478 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/offset_appengine.go @@ -0,0 +1,24 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine safe + +package mat + +import "reflect" + +var sizeOfFloat64 = int(reflect.TypeOf(float64(0)).Size()) + +// offset returns the number of float64 values b[0] is after a[0]. +func offset(a, b []float64) int { + va0 := reflect.ValueOf(a).Index(0) + vb0 := reflect.ValueOf(b).Index(0) + if va0.Addr() == vb0.Addr() { + return 0 + } + // This expression must be atomic with respect to GC moves. + // At this stage this is true, because the GC does not + // move. See https://golang.org/issue/12445. + return int(vb0.UnsafeAddr()-va0.UnsafeAddr()) / sizeOfFloat64 +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/pool.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/pool.go new file mode 100644 index 00000000..065fd542 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/pool.go @@ -0,0 +1,236 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var tab64 = [64]byte{ + 0x3f, 0x00, 0x3a, 0x01, 0x3b, 0x2f, 0x35, 0x02, + 0x3c, 0x27, 0x30, 0x1b, 0x36, 0x21, 0x2a, 0x03, + 0x3d, 0x33, 0x25, 0x28, 0x31, 0x12, 0x1c, 0x14, + 0x37, 0x1e, 0x22, 0x0b, 0x2b, 0x0e, 0x16, 0x04, + 0x3e, 0x39, 0x2e, 0x34, 0x26, 0x1a, 0x20, 0x29, + 0x32, 0x24, 0x11, 0x13, 0x1d, 0x0a, 0x0d, 0x15, + 0x38, 0x2d, 0x19, 0x1f, 0x23, 0x10, 0x09, 0x0c, + 0x2c, 0x18, 0x0f, 0x08, 0x17, 0x07, 0x06, 0x05, +} + +// bits returns the ceiling of base 2 log of v. +// Approach based on http://stackoverflow.com/a/11398748. +func bits(v uint64) byte { + if v == 0 { + return 0 + } + v <<= 2 + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v |= v >> 32 + return tab64[((v-(v>>1))*0x07EDD5E59A4E28C2)>>58] - 1 +} + +var ( + // pool contains size stratified workspace Dense pools. + // Each pool element i returns sized matrices with a data + // slice capped at 1< 2. + if !m.IsZero() { + if fr != r { + panic(ErrShape) + } + if _, lc := factors[len(factors)-1].Dims(); lc != c { + panic(ErrShape) + } + } + + dims := make([]int, len(factors)+1) + dims[0] = r + dims[len(dims)-1] = c + pc := fc + for i, f := range factors[1:] { + cr, cc := f.Dims() + dims[i+1] = cr + if pc != cr { + panic(ErrShape) + } + pc = cc + } + + return &multiplier{ + factors: factors, + dims: dims, + table: newTable(len(factors)), + } +} + +// optimize determines an optimal matrix multiply operation order. +func (p *multiplier) optimize() { + if debugProductWalk { + fmt.Printf("chain dims: %v\n", p.dims) + } + const maxInt = int(^uint(0) >> 1) + for f := 1; f < len(p.factors); f++ { + for i := 0; i < len(p.factors)-f; i++ { + j := i + f + p.table.set(i, j, entry{cost: maxInt}) + for k := i; k < j; k++ { + cost := p.table.at(i, k).cost + p.table.at(k+1, j).cost + p.dims[i]*p.dims[k+1]*p.dims[j+1] + if cost < p.table.at(i, j).cost { + p.table.set(i, j, entry{cost: cost, k: k}) + } + } + } + } +} + +// multiply walks the optimal operation tree found by optimize, +// leaving the final result in the stack. It returns the +// product, which may be copied but should be returned to +// the workspace pool. +func (p *multiplier) multiply() *Dense { + result, _ := p.multiplySubchain(0, len(p.factors)-1) + if debugProductWalk { + r, c := result.Dims() + fmt.Printf("\tpop result (%d×%d) cost=%d\n", r, c, p.table.at(0, len(p.factors)-1).cost) + } + return result.(*Dense) +} + +func (p *multiplier) multiplySubchain(i, j int) (m Matrix, intermediate bool) { + if i == j { + return p.factors[i], false + } + + a, aTmp := p.multiplySubchain(i, p.table.at(i, j).k) + b, bTmp := p.multiplySubchain(p.table.at(i, j).k+1, j) + + ar, ac := a.Dims() + br, bc := b.Dims() + if ac != br { + // Panic with a string since this + // is not a user-facing panic. + panic(ErrShape.Error()) + } + + if debugProductWalk { + fmt.Printf("\tpush f[%d] (%d×%d)%s * f[%d] (%d×%d)%s\n", + i, ar, ac, result(aTmp), j, br, bc, result(bTmp)) + } + + r := getWorkspace(ar, bc, false) + r.Mul(a, b) + if aTmp { + putWorkspace(a.(*Dense)) + } + if bTmp { + putWorkspace(b.(*Dense)) + } + return r, true +} + +type entry struct { + k int // is the chain subdivision index. + cost int // cost is the cost of the operation. +} + +// table is a row major n×n dynamic programming table. +type table struct { + n int + entries []entry +} + +func newTable(n int) table { + return table{n: n, entries: make([]entry, n*n)} +} + +func (t table) at(i, j int) entry { return t.entries[i*t.n+j] } +func (t table) set(i, j int, e entry) { t.entries[i*t.n+j] = e } + +type result bool + +func (r result) String() string { + if r { + return " (popped result)" + } + return "" +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/qr.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/qr.go new file mode 100644 index 00000000..c56845c7 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/qr.go @@ -0,0 +1,233 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// QR is a type for creating and using the QR factorization of a matrix. +type QR struct { + qr *Dense + tau []float64 + cond float64 +} + +func (qr *QR) updateCond(norm lapack.MatrixNorm) { + // Since A = Q*R, and Q is orthogonal, we get for the condition number κ + // κ(A) := |A| |A^-1| = |Q*R| |(Q*R)^-1| = |R| |R^-1 * Q^T| + // = |R| |R^-1| = κ(R), + // where we used that fact that Q^-1 = Q^T. However, this assumes that + // the matrix norm is invariant under orthogonal transformations which + // is not the case for CondNorm. Hopefully the error is negligible: κ + // is only a qualitative measure anyway. + n := qr.qr.mat.Cols + work := getFloats(3*n, false) + iwork := getInts(n, false) + r := qr.qr.asTriDense(n, blas.NonUnit, blas.Upper) + v := lapack64.Trcon(norm, r.mat, work, iwork) + putFloats(work) + putInts(iwork) + qr.cond = 1 / v +} + +// Factorize computes the QR factorization of an m×n matrix a where m >= n. The QR +// factorization always exists even if A is singular. +// +// The QR decomposition is a factorization of the matrix A such that A = Q * R. +// The matrix Q is an orthonormal m×m matrix, and R is an m×n upper triangular matrix. +// Q and R can be extracted using the QTo and RTo methods. +func (qr *QR) Factorize(a Matrix) { + qr.factorize(a, CondNorm) +} + +func (qr *QR) factorize(a Matrix, norm lapack.MatrixNorm) { + m, n := a.Dims() + if m < n { + panic(ErrShape) + } + k := min(m, n) + if qr.qr == nil { + qr.qr = &Dense{} + } + qr.qr.Clone(a) + work := []float64{0} + qr.tau = make([]float64, k) + lapack64.Geqrf(qr.qr.mat, qr.tau, work, -1) + + work = getFloats(int(work[0]), false) + lapack64.Geqrf(qr.qr.mat, qr.tau, work, len(work)) + putFloats(work) + qr.updateCond(norm) +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a successful factorization. +func (qr *QR) Cond() float64 { + if qr.qr == nil || qr.qr.IsZero() { + panic("qr: no decomposition computed") + } + return qr.cond +} + +// TODO(btracey): Add in the "Reduced" forms for extracting the n×n orthogonal +// and upper triangular matrices. + +// RTo extracts the m×n upper trapezoidal matrix from a QR decomposition. +// If dst is nil, a new matrix is allocated. The resulting dst matrix is returned. +func (qr *QR) RTo(dst *Dense) *Dense { + r, c := qr.qr.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + // Disguise the QR as an upper triangular + t := &TriDense{ + mat: blas64.Triangular{ + N: c, + Stride: qr.qr.mat.Stride, + Data: qr.qr.mat.Data, + Uplo: blas.Upper, + Diag: blas.NonUnit, + }, + cap: qr.qr.capCols, + } + dst.Copy(t) + + // Zero below the triangular. + for i := r; i < c; i++ { + zero(dst.mat.Data[i*dst.mat.Stride : i*dst.mat.Stride+c]) + } + + return dst +} + +// QTo extracts the m×m orthonormal matrix Q from a QR decomposition. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +func (qr *QR) QTo(dst *Dense) *Dense { + r, _ := qr.qr.Dims() + if dst == nil { + dst = NewDense(r, r, nil) + } else { + dst.reuseAsZeroed(r, r) + } + + // Set Q = I. + for i := 0; i < r*r; i += r + 1 { + dst.mat.Data[i] = 1 + } + + // Construct Q from the elementary reflectors. + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, len(work)) + putFloats(work) + + return dst +} + +// Solve finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and b, where A is an m×n matrix represented in its QR factorized +// form. If A is singular or near-singular a Condition error is returned. +// See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters. +// If trans == false, find X such that ||A*X - B||_2 is minimized. +// If trans == true, find the minimum norm solution of A^T * X = B. +// The solution matrix, X, is stored in place into m. +func (qr *QR) Solve(x *Dense, trans bool, b Matrix) error { + r, c := qr.qr.Dims() + br, bc := b.Dims() + + // The QR solve algorithm stores the result in-place into the right hand side. + // The storage for the answer must be large enough to hold both b and x. + // However, this method's receiver must be the size of x. Copy b, and then + // copy the result into m at the end. + if trans { + if c != br { + panic(ErrShape) + } + x.reuseAs(r, bc) + } else { + if r != br { + panic(ErrShape) + } + x.reuseAs(c, bc) + } + // Do not need to worry about overlap between m and b because x has its own + // independent storage. + w := getWorkspace(max(r, c), bc, false) + w.Copy(b) + t := qr.qr.asTriDense(qr.qr.mat.Cols, blas.NonUnit, blas.Upper).mat + if trans { + ok := lapack64.Trtrs(blas.Trans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + for i := c; i < r; i++ { + zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) + } + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, len(work)) + putFloats(work) + } else { + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, len(work)) + putFloats(work) + + ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + } + // X was set above to be the correct size for the result. + x.Copy(w) + putWorkspace(w) + if qr.cond > ConditionTolerance { + return Condition(qr.cond) + } + return nil +} + +// SolveVec finds a minimum-norm solution to a system of linear equations, +// Ax = b. +// See QR.Solve for the full documentation. +func (qr *QR) SolveVec(x *VecDense, trans bool, b Vector) error { + r, c := qr.qr.Dims() + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + bm := Matrix(b) + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if x != b { + x.checkOverlap(bmat) + } + b := VecDense{mat: bmat, n: b.Len()} + bm = b.asDense() + } + if trans { + x.reuseAs(r) + } else { + x.reuseAs(c) + } + return qr.Solve(x.asDense(), trans, bm) + +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/shadow.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/shadow.go new file mode 100644 index 00000000..cc62e44f --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/shadow.go @@ -0,0 +1,226 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" +) + +const ( + // regionOverlap is the panic string used for the general case + // of a matrix region overlap between a source and destination. + regionOverlap = "mat: bad region: overlap" + + // regionIdentity is the panic string used for the specific + // case of complete agreement between a source and a destination. + regionIdentity = "mat: bad region: identical" + + // mismatchedStrides is the panic string used for overlapping + // data slices with differing strides. + mismatchedStrides = "mat: bad region: different strides" +) + +// checkOverlap returns false if the receiver does not overlap data elements +// referenced by the parameter and panics otherwise. +// +// checkOverlap methods return a boolean to allow the check call to be added to a +// boolean expression, making use of short-circuit operators. +func checkOverlap(a, b blas64.General) bool { + if cap(a.Data) == 0 || cap(b.Data) == 0 { + return false + } + + off := offset(a.Data[:1], b.Data[:1]) + + if off == 0 { + // At least one element overlaps. + if a.Cols == b.Cols && a.Rows == b.Rows && a.Stride == b.Stride { + panic(regionIdentity) + } + panic(regionOverlap) + } + + if off > 0 && len(a.Data) <= off { + // We know a is completely before b. + return false + } + if off < 0 && len(b.Data) <= -off { + // We know a is completely after b. + return false + } + + if a.Stride != b.Stride { + // Too hard, so assume the worst. + panic(mismatchedStrides) + } + + if off < 0 { + off = -off + a.Cols, b.Cols = b.Cols, a.Cols + } + if rectanglesOverlap(off, a.Cols, b.Cols, a.Stride) { + panic(regionOverlap) + } + return false +} + +func (m *Dense) checkOverlap(a blas64.General) bool { + return checkOverlap(m.RawMatrix(), a) +} + +func (m *Dense) checkOverlapMatrix(a Matrix) bool { + if m == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return m.checkOverlap(amat) +} + +func (s *SymDense) checkOverlap(a blas64.General) bool { + return checkOverlap(generalFromSymmetric(s.RawSymmetric()), a) +} + +func (s *SymDense) checkOverlapMatrix(a Matrix) bool { + if s == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return s.checkOverlap(amat) +} + +// generalFromSymmetric returns a blas64.General with the backing +// data and dimensions of a. +func generalFromSymmetric(a blas64.Symmetric) blas64.General { + return blas64.General{ + Rows: a.N, + Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } +} + +func (t *TriDense) checkOverlap(a blas64.General) bool { + return checkOverlap(generalFromTriangular(t.RawTriangular()), a) +} + +func (t *TriDense) checkOverlapMatrix(a Matrix) bool { + if t == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return t.checkOverlap(amat) +} + +// generalFromTriangular returns a blas64.General with the backing +// data and dimensions of a. +func generalFromTriangular(a blas64.Triangular) blas64.General { + return blas64.General{ + Rows: a.N, + Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } +} + +func (v *VecDense) checkOverlap(a blas64.Vector) bool { + mat := v.mat + if cap(mat.Data) == 0 || cap(a.Data) == 0 { + return false + } + + off := offset(mat.Data[:1], a.Data[:1]) + + if off == 0 { + // At least one element overlaps. + if mat.Inc == a.Inc && len(mat.Data) == len(a.Data) { + panic(regionIdentity) + } + panic(regionOverlap) + } + + if off > 0 && len(mat.Data) <= off { + // We know v is completely before a. + return false + } + if off < 0 && len(a.Data) <= -off { + // We know v is completely after a. + return false + } + + if mat.Inc != a.Inc { + // Too hard, so assume the worst. + panic(mismatchedStrides) + } + + if mat.Inc == 1 || off&mat.Inc == 0 { + panic(regionOverlap) + } + return false +} + +// rectanglesOverlap returns whether the strided rectangles a and b overlap +// when b is offset by off elements after a but has at least one element before +// the end of a. off must be positive. a and b have aCols and bCols respectively. +// +// rectanglesOverlap works by shifting both matrices left such that the left +// column of a is at 0. The column indexes are flattened by obtaining the shifted +// relative left and right column positions modulo the common stride. This allows +// direct comparison of the column offsets when the matrix backing data slices +// are known to overlap. +func rectanglesOverlap(off, aCols, bCols, stride int) bool { + if stride == 1 { + // Unit stride means overlapping data + // slices must overlap as matrices. + return true + } + + // Flatten the shifted matrix column positions + // so a starts at 0, modulo the common stride. + aTo := aCols + // The mod stride operations here make the from + // and to indexes comparable between a and b when + // the data slices of a and b overlap. + bFrom := off % stride + bTo := (bFrom + bCols) % stride + + if bTo == 0 || bFrom < bTo { + // b matrix is not wrapped: compare for + // simple overlap. + return bFrom < aTo + } + + // b strictly wraps and so must overlap with a. + return true +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/solve.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/solve.go new file mode 100644 index 00000000..cbf0ed9d --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/solve.go @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Solve finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and B. If A is singular or near-singular, a Condition error +// is returned. See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters: +// - if m >= n, find X such that ||A*X - B||_2 is minimized, +// - if m < n, find the minimum norm solution of A * X = B. +// The solution matrix, X, is stored in-place into the receiver. +func (m *Dense) Solve(a, b Matrix) error { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br { + panic(ErrShape) + } + m.reuseAs(ac, bc) + + // TODO(btracey): Add special cases for SymDense, etc. + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + switch rma := aU.(type) { + case RawTriangular: + side := blas.Left + tA := blas.NoTrans + if aTrans { + tA = blas.Trans + } + + switch rm := bU.(type) { + case RawMatrixer: + if m != bU || bTrans { + if m == bU || m.checkOverlap(rm.RawMatrix()) { + tmp := getWorkspace(br, bc, false) + tmp.Copy(b) + m.Copy(tmp) + putWorkspace(tmp) + break + } + m.Copy(b) + } + default: + if m != bU { + m.Copy(b) + } else if bTrans { + // m and b share data so Copy cannot be used directly. + tmp := getWorkspace(br, bc, false) + tmp.Copy(b) + m.Copy(tmp) + putWorkspace(tmp) + } + } + + rm := rma.RawTriangular() + blas64.Trsm(side, tA, 1, rm, m.mat) + work := getFloats(3*rm.N, false) + iwork := getInts(rm.N, false) + cond := lapack64.Trcon(CondNorm, rm, work, iwork) + putFloats(work) + putInts(iwork) + if cond > ConditionTolerance { + return Condition(cond) + } + return nil + } + + switch { + case ar == ac: + if a == b { + // x = I. + if ar == 1 { + m.mat.Data[0] = 1 + return nil + } + for i := 0; i < ar; i++ { + v := m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+ac] + zero(v) + v[i] = 1 + } + return nil + } + var lu LU + lu.Factorize(a) + return lu.Solve(m, false, b) + case ar > ac: + var qr QR + qr.Factorize(a) + return qr.Solve(m, false, b) + default: + var lq LQ + lq.Factorize(a) + return lq.Solve(m, false, b) + } +} + +// SolveVec finds a minimum-norm solution to a system of linear equations defined +// by the matrix a and the right-hand side column vector b. If A is singular or +// near-singular, a Condition error is returned. See the documentation for +// Dense.Solve for more information. +func (v *VecDense) SolveVec(a Matrix, b Vector) error { + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + _, c := a.Dims() + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + v.reuseAs(c) + m := v.asDense() + // We conditionally create bm as m when b and v are identical + // to prevent the overlap detection code from identifying m + // and bm as overlapping but not identical. + bm := m + if v != b { + b := VecDense{mat: bmat, n: b.Len()} + bm = b.asDense() + } + return m.Solve(a, bm) + } + + v.reuseAs(c) + m := v.asDense() + return m.Solve(a, b) +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/svd.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/svd.go new file mode 100644 index 00000000..ef1f21cf --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/svd.go @@ -0,0 +1,190 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// SVD is a type for creating and using the Singular Value Decomposition (SVD) +// of a matrix. +type SVD struct { + kind SVDKind + + s []float64 + u blas64.General + vt blas64.General +} + +// Factorize computes the singular value decomposition (SVD) of the input matrix +// A. The singular values of A are computed in all cases, while the singular +// vectors are optionally computed depending on the input kind. +// +// The full singular value decomposition (kind == SVDFull) deconstructs A as +// A = U * Σ * V^T +// where Σ is an m×n diagonal matrix of singular vectors, U is an m×m unitary +// matrix of left singular vectors, and V is an n×n matrix of right singular vectors. +// +// It is frequently not necessary to compute the full SVD. Computation time and +// storage costs can be reduced using the appropriate kind. Only the singular +// values can be computed (kind == SVDNone), or a "thin" representation of the +// singular vectors (kind = SVDThin). The thin representation can save a significant +// amount of memory if m >> n. See the documentation for the lapack.SVDKind values +// for more information. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (svd *SVD) Factorize(a Matrix, kind SVDKind) (ok bool) { + m, n := a.Dims() + var jobU, jobVT lapack.SVDJob + switch kind { + default: + panic("svd: bad input kind") + case SVDNone: + jobU = lapack.SVDNone + jobVT = lapack.SVDNone + case SVDFull: + // TODO(btracey): This code should be modified to have the smaller + // matrix written in-place into aCopy when the lapack/native/dgesvd + // implementation is complete. + svd.u = blas64.General{ + Rows: m, + Cols: m, + Stride: m, + Data: use(svd.u.Data, m*m), + } + svd.vt = blas64.General{ + Rows: n, + Cols: n, + Stride: n, + Data: use(svd.vt.Data, n*n), + } + jobU = lapack.SVDAll + jobVT = lapack.SVDAll + case SVDThin: + // TODO(btracey): This code should be modified to have the larger + // matrix written in-place into aCopy when the lapack/native/dgesvd + // implementation is complete. + svd.u = blas64.General{ + Rows: m, + Cols: min(m, n), + Stride: min(m, n), + Data: use(svd.u.Data, m*min(m, n)), + } + svd.vt = blas64.General{ + Rows: min(m, n), + Cols: n, + Stride: n, + Data: use(svd.vt.Data, min(m, n)*n), + } + jobU = lapack.SVDInPlace + jobVT = lapack.SVDInPlace + } + + // A is destroyed on call, so copy the matrix. + aCopy := DenseCopyOf(a) + svd.kind = kind + svd.s = use(svd.s, min(m, n)) + + work := []float64{0} + lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, -1) + work = getFloats(int(work[0]), false) + ok = lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, len(work)) + putFloats(work) + if !ok { + svd.kind = 0 + } + return ok +} + +// Kind returns the matrix.SVDKind of the decomposition. If no decomposition has been +// computed, Kind returns 0. +func (svd *SVD) Kind() SVDKind { + return svd.kind +} + +// Cond returns the 2-norm condition number for the factorized matrix. Cond will +// panic if the receiver does not contain a successful factorization. +func (svd *SVD) Cond() float64 { + if svd.kind == 0 { + panic("svd: no decomposition computed") + } + return svd.s[0] / svd.s[len(svd.s)-1] +} + +// Values returns the singular values of the factorized matrix in decreasing order. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(m,n), and Values will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// Values will panic if the receiver does not contain a successful factorization. +func (svd *SVD) Values(s []float64) []float64 { + if svd.kind == 0 { + panic("svd: no decomposition computed") + } + if s == nil { + s = make([]float64, len(svd.s)) + } + if len(s) != len(svd.s) { + panic(ErrSliceLengthMismatch) + } + copy(s, svd.s) + return s +} + +// UTo extracts the matrix U from the singular value decomposition, storing +// the result in-place into dst. U is size m×m if svd.Kind() == SVDFull, +// of size m×min(m,n) if svd.Kind() == SVDThin, and UTo panics otherwise. +func (svd *SVD) UTo(dst *Dense) *Dense { + kind := svd.kind + if kind != SVDFull && kind != SVDThin { + panic("mat: improper SVD kind") + } + r := svd.u.Rows + c := svd.u.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: svd.u, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + + return dst +} + +// VTo extracts the matrix V from the singular value decomposition, storing +// the result in-place into dst. V is size n×n if svd.Kind() == SVDFull, +// of size n×min(m,n) if svd.Kind() == SVDThin, and VTo panics otherwise. +func (svd *SVD) VTo(dst *Dense) *Dense { + kind := svd.kind + if kind != SVDFull && kind != SVDThin { + panic("mat: improper SVD kind") + } + r := svd.vt.Rows + c := svd.vt.Cols + if dst == nil { + dst = NewDense(c, r, nil) + } else { + dst.reuseAs(c, r) + } + + tmp := &Dense{ + mat: svd.vt, + capRows: r, + capCols: c, + } + dst.Copy(tmp.T()) + + return dst +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/symband.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/symband.go new file mode 100644 index 00000000..967c5ff3 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/symband.go @@ -0,0 +1,175 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + symBandDense *SymBandDense + _ Matrix = symBandDense + _ Symmetric = symBandDense + _ Banded = symBandDense + _ RawSymBander = symBandDense + _ MutableSymBanded = symBandDense + + _ NonZeroDoer = symBandDense + _ RowNonZeroDoer = symBandDense + _ ColNonZeroDoer = symBandDense +) + +// SymBandDense represents a symmetric band matrix in dense storage format. +type SymBandDense struct { + mat blas64.SymmetricBand +} + +// MutableSymBanded is a symmetric band matrix interface type that allows elements +// to be altered. +type MutableSymBanded interface { + Symmetric + Bandwidth() (kl, ku int) + SetSymBand(i, j int, v float64) +} + +// A RawSymBander can return a blas64.SymmetricBand representation of the receiver. +// Changes to the blas64.SymmetricBand.Data slice will be reflected in the original +// matrix, changes to the N, K, Stride and Uplo fields will not. +type RawSymBander interface { + RawSymBand() blas64.SymmetricBand +} + +// NewSymBandDense creates a new SymBand matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*(k+1), +// data is used as the backing slice, and changes to the elements of the returned +// SymBandDense will be reflected in data. If neither of these is true, NewSymBandDense +// will panic. k must be at least zero and less than n, otherwise NewBandDense will panic. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. SymBandDense matrices +// are stored in the upper triangle. For example, the matrix +// 1 2 3 0 0 0 +// 2 4 5 6 0 0 +// 3 5 7 8 9 0 +// 0 6 8 10 11 12 +// 0 0 9 11 13 14 +// 0 0 0 12 14 15 +// becomes (* entries are never accessed) +// 1 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 * +// 15 * * +// which is passed to NewBandDense as []float64{1, 2, 3, 4, ...} with k=2. +// Only the values in the band portion of the matrix are used. +func NewSymBandDense(n, k int, data []float64) *SymBandDense { + if n < 0 || k < 0 { + panic("mat: negative dimension") + } + if k+1 > n { + panic("mat: band out of range") + } + bc := k + 1 + if data != nil && len(data) != n*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*bc) + } + return &SymBandDense{ + mat: blas64.SymmetricBand{ + N: n, + K: k, + Stride: bc, + Uplo: blas.Upper, + Data: data, + }, + } +} + +// NewDiagonal is a convenience function that returns a diagonal matrix represented by a +// SymBandDense. The length of data must be n or data must be nil, otherwise NewDiagonal +// will panic. +func NewDiagonal(n int, data []float64) *SymBandDense { + return NewSymBandDense(n, 0, data) +} + +// Dims returns the number of rows and columns in the matrix. +func (s *SymBandDense) Dims() (r, c int) { + return s.mat.N, s.mat.N +} + +// Symmetric returns the size of the receiver. +func (s *SymBandDense) Symmetric() int { + return s.mat.N +} + +// Bandwidth returns the bandwidths of the matrix. +func (s *SymBandDense) Bandwidth() (kl, ku int) { + return s.mat.K, s.mat.K +} + +// T implements the Matrix interface. Symmetric matrices, by definition, are +// equal to their transpose, and this is a no-op. +func (s *SymBandDense) T() Matrix { + return s +} + +// TBand implements the Banded interface. +func (s *SymBandDense) TBand() Banded { + return s +} + +// RawSymBand returns the underlying blas64.SymBand used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.SymBand. +func (s *SymBandDense) RawSymBand() blas64.SymmetricBand { + return s.mat +} + +// DoNonZero calls the function fn for each of the non-zero elements of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoNonZero(fn func(i, j int, v float64)) { + for i := 0; i < s.mat.N; i++ { + for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || s.mat.N <= i { + panic(ErrRowAccess) + } + for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || s.mat.N <= j { + panic(ErrColAccess) + } + for i := 0; i < s.mat.N; i++ { + if i-s.mat.K <= j && j < i+s.mat.K+1 { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/symmetric.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/symmetric.go new file mode 100644 index 00000000..d3ac2617 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/symmetric.go @@ -0,0 +1,568 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + symDense *SymDense + + _ Matrix = symDense + _ Symmetric = symDense + _ RawSymmetricer = symDense + _ MutableSymmetric = symDense +) + +const ( + badSymTriangle = "mat: blas64.Symmetric not upper" + badSymCap = "mat: bad capacity for SymDense" +) + +// SymDense is a symmetric matrix that uses dense storage. SymDense +// matrices are stored in the upper triangle. +type SymDense struct { + mat blas64.Symmetric + cap int +} + +// Symmetric represents a symmetric matrix (where the element at {i, j} equals +// the element at {j, i}). Symmetric matrices are always square. +type Symmetric interface { + Matrix + // Symmetric returns the number of rows/columns in the matrix. + Symmetric() int +} + +// A RawSymmetricer can return a view of itself as a BLAS Symmetric matrix. +type RawSymmetricer interface { + RawSymmetric() blas64.Symmetric +} + +// A MutableSymmetric can set elements of a symmetric matrix. +type MutableSymmetric interface { + Symmetric + SetSym(i, j int, v float64) +} + +// NewSymDense creates a new Symmetric matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*n, data is +// used as the backing slice, and changes to the elements of the returned SymDense +// will be reflected in data. If neither of these is true, NewSymDense will panic. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +// Only the values in the upper triangular portion of the matrix are used. +func NewSymDense(n int, data []float64) *SymDense { + if n < 0 { + panic("mat: negative dimension") + } + if data != nil && n*n != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*n) + } + return &SymDense{ + mat: blas64.Symmetric{ + N: n, + Stride: n, + Data: data, + Uplo: blas.Upper, + }, + cap: n, + } +} + +// Dims returns the number of rows and columns in the matrix. +func (s *SymDense) Dims() (r, c int) { + return s.mat.N, s.mat.N +} + +// Caps returns the number of rows and columns in the backing matrix. +func (s *SymDense) Caps() (r, c int) { + return s.cap, s.cap +} + +// T implements the Matrix interface. Symmetric matrices, by definition, are +// equal to their transpose, and this is a no-op. +func (s *SymDense) T() Matrix { + return s +} + +func (s *SymDense) Symmetric() int { + return s.mat.N +} + +// RawSymmetric returns the matrix as a blas64.Symmetric. The returned +// value must be stored in upper triangular format. +func (s *SymDense) RawSymmetric() blas64.Symmetric { + return s.mat +} + +// SetRawSymmetric sets the underlying blas64.Symmetric used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in b. SetRawSymmetric will panic if b is not an upper-encoded symmetric +// matrix. +func (s *SymDense) SetRawSymmetric(b blas64.Symmetric) { + if b.Uplo != blas.Upper { + panic(badSymTriangle) + } + s.mat = b +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (s *SymDense) Reset() { + // N and Stride must be zeroed in unison. + s.mat.N, s.mat.Stride = 0, 0 + s.mat.Data = s.mat.Data[:0] +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. SymDense matrices can be zeroed using Reset. +func (s *SymDense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return s.mat.N == 0 +} + +// reuseAs resizes an empty matrix to a n×n matrix, +// or checks that a non-empty matrix is n×n. +func (s *SymDense) reuseAs(n int) { + if n == 0 { + panic(ErrZeroLength) + } + if s.mat.N > s.cap { + panic(badSymCap) + } + if s.IsZero() { + s.mat = blas64.Symmetric{ + N: n, + Stride: n, + Data: use(s.mat.Data, n*n), + Uplo: blas.Upper, + } + s.cap = n + return + } + if s.mat.Uplo != blas.Upper { + panic(badSymTriangle) + } + if s.mat.N != n { + panic(ErrShape) + } +} + +func (s *SymDense) isolatedWorkspace(a Symmetric) (w *SymDense, restore func()) { + n := a.Symmetric() + if n == 0 { + panic(ErrZeroLength) + } + w = getWorkspaceSym(n, false) + return w, func() { + s.CopySym(w) + putWorkspaceSym(w) + } +} + +func (s *SymDense) AddSym(a, b Symmetric) { + n := a.Symmetric() + if n != b.Symmetric() { + panic(ErrShape) + } + s.reuseAs(n) + + if a, ok := a.(RawSymmetricer); ok { + if b, ok := b.(RawSymmetricer); ok { + amat, bmat := a.RawSymmetric(), b.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(amat)) + } + if s != b { + s.checkOverlap(generalFromSymmetric(bmat)) + } + for i := 0; i < n; i++ { + btmp := bmat.Data[i*bmat.Stride+i : i*bmat.Stride+n] + stmp := s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+n] + for j, v := range amat.Data[i*amat.Stride+i : i*amat.Stride+n] { + stmp[j] = v + btmp[j] + } + } + return + } + } + + s.checkOverlapMatrix(a) + s.checkOverlapMatrix(b) + for i := 0; i < n; i++ { + stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + for j := i; j < n; j++ { + stmp[j] = a.At(i, j) + b.At(i, j) + } + } +} + +func (s *SymDense) CopySym(a Symmetric) int { + n := a.Symmetric() + n = min(n, s.mat.N) + if n == 0 { + return 0 + } + switch a := a.(type) { + case RawSymmetricer: + amat := a.RawSymmetric() + if amat.Uplo != blas.Upper { + panic(badSymTriangle) + } + for i := 0; i < n; i++ { + copy(s.mat.Data[i*s.mat.Stride+i:i*s.mat.Stride+n], amat.Data[i*amat.Stride+i:i*amat.Stride+n]) + } + default: + for i := 0; i < n; i++ { + stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + for j := i; j < n; j++ { + stmp[j] = a.At(i, j) + } + } + } + return n +} + +// SymRankOne performs a symetric rank-one update to the matrix a and stores +// the result in the receiver +// s = a + alpha * x * x' +func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x Vector) { + n, c := x.Dims() + if a.Symmetric() != n || c != 1 { + panic(ErrShape) + } + s.reuseAs(n) + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.CopySym(a) + } + + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat := rv.RawVector() + s.checkOverlap((&VecDense{mat: xmat, n: n}).asGeneral()) + blas64.Syr(alpha, xmat, s.mat) + return + } + + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.set(i, j, s.at(i, j)+alpha*x.AtVec(i)*x.AtVec(j)) + } + } +} + +// SymRankK performs a symmetric rank-k update to the matrix a and stores the +// result into the receiver. If a is zero, see SymOuterK. +// s = a + alpha * x * x' +func (s *SymDense) SymRankK(a Symmetric, alpha float64, x Matrix) { + n := a.Symmetric() + r, _ := x.Dims() + if r != n { + panic(ErrShape) + } + xMat, aTrans := untranspose(x) + var g blas64.General + if rm, ok := xMat.(RawMatrixer); ok { + g = rm.RawMatrix() + } else { + g = DenseCopyOf(x).mat + aTrans = false + } + if a != s { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.reuseAs(n) + s.CopySym(a) + } + t := blas.NoTrans + if aTrans { + t = blas.Trans + } + blas64.Syrk(t, alpha, g, 1, s.mat) +} + +// SymOuterK calculates the outer product of x with itself and stores +// the result into the receiver. It is equivalent to the matrix +// multiplication +// s = alpha * x * x'. +// In order to update an existing matrix, see SymRankOne. +func (s *SymDense) SymOuterK(alpha float64, x Matrix) { + n, _ := x.Dims() + switch { + case s.IsZero(): + s.mat = blas64.Symmetric{ + N: n, + Stride: n, + Data: useZeroed(s.mat.Data, n*n), + Uplo: blas.Upper, + } + s.cap = n + s.SymRankK(s, alpha, x) + case s.mat.Uplo != blas.Upper: + panic(badSymTriangle) + case s.mat.N == n: + if s == x { + w := getWorkspaceSym(n, true) + w.SymRankK(w, alpha, x) + s.CopySym(w) + putWorkspaceSym(w) + } else { + switch r := x.(type) { + case RawMatrixer: + s.checkOverlap(r.RawMatrix()) + case RawSymmetricer: + s.checkOverlap(generalFromSymmetric(r.RawSymmetric())) + case RawTriangular: + s.checkOverlap(generalFromTriangular(r.RawTriangular())) + } + // Only zero the upper triangle. + for i := 0; i < n; i++ { + ri := i * s.mat.Stride + zero(s.mat.Data[ri+i : ri+n]) + } + s.SymRankK(s, alpha, x) + } + default: + panic(ErrShape) + } +} + +// RankTwo performs a symmmetric rank-two update to the matrix a and stores +// the result in the receiver +// m = a + alpha * (x * y' + y * x') +func (s *SymDense) RankTwo(a Symmetric, alpha float64, x, y Vector) { + n := s.mat.N + xr, xc := x.Dims() + if xr != n || xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yr != n || yc != 1 { + panic(ErrShape) + } + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + s.checkOverlap((&VecDense{mat: xmat, n: x.Len()}).asGeneral()) + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + s.checkOverlap((&VecDense{mat: ymat, n: y.Len()}).asGeneral()) + } else { + fast = false + } + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.reuseAs(n) + s.CopySym(a) + } + + if fast { + if s != a { + s.reuseAs(n) + s.CopySym(a) + } + blas64.Syr2(alpha, xmat, ymat, s.mat) + return + } + + for i := 0; i < n; i++ { + s.reuseAs(n) + for j := i; j < n; j++ { + s.set(i, j, a.At(i, j)+alpha*(x.AtVec(i)*y.AtVec(j)+y.AtVec(i)*x.AtVec(j))) + } + } +} + +// ScaleSym multiplies the elements of a by f, placing the result in the receiver. +func (s *SymDense) ScaleSym(f float64, a Symmetric) { + n := a.Symmetric() + s.reuseAs(n) + if a, ok := a.(RawSymmetricer); ok { + amat := a.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(amat)) + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = f * amat.Data[i*amat.Stride+j] + } + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = f * a.At(i, j) + } + } +} + +// SubsetSym extracts a subset of the rows and columns of the matrix a and stores +// the result in-place into the receiver. The resulting matrix size is +// len(set)×len(set). Specifically, at the conclusion of SubsetSym, +// s.At(i, j) equals a.At(set[i], set[j]). Note that the supplied set does not +// have to be a strict subset, dimension repeats are allowed. +func (s *SymDense) SubsetSym(a Symmetric, set []int) { + n := len(set) + na := a.Symmetric() + s.reuseAs(n) + var restore func() + if a == s { + s, restore = s.isolatedWorkspace(a) + defer restore() + } + + if a, ok := a.(RawSymmetricer); ok { + raw := a.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(raw)) + } + for i := 0; i < n; i++ { + ssub := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + r := set[i] + rsub := raw.Data[r*raw.Stride : r*raw.Stride+na] + for j := i; j < n; j++ { + c := set[j] + if r <= c { + ssub[j] = rsub[c] + } else { + ssub[j] = raw.Data[c*raw.Stride+r] + } + } + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = a.At(set[i], set[j]) + } + } +} + +// SliceSquare returns a new Matrix that shares backing data with the receiver. +// The returned matrix starts at {i,i} of the receiver and extends k-i rows +// and columns. The final row and column in the resulting matrix is k-1. +// SliceSquare panics with ErrIndexOutOfRange if the slice is outside the capacity +// of the receiver. +func (s *SymDense) SliceSquare(i, k int) Matrix { + sz := s.cap + if i < 0 || sz < i || k < i || sz < k { + panic(ErrIndexOutOfRange) + } + v := *s + v.mat.Data = s.mat.Data[i*s.mat.Stride+i : (k-1)*s.mat.Stride+k] + v.mat.N = k - i + v.cap = s.cap - i + return &v +} + +// GrowSquare returns the receiver expanded by n rows and n columns. If the +// dimensions of the expanded matrix are outside the capacity of the receiver +// a new allocation is made, otherwise not. Note that the receiver itself is +// not modified during the call to GrowSquare. +func (s *SymDense) GrowSquare(n int) Matrix { + if n < 0 { + panic(ErrIndexOutOfRange) + } + if n == 0 { + return s + } + var v SymDense + n += s.mat.N + if n > s.cap { + v.mat = blas64.Symmetric{ + N: n, + Stride: n, + Uplo: blas.Upper, + Data: make([]float64, n*n), + } + v.cap = n + // Copy elements, including those not currently visible. Use a temporary + // structure to avoid modifying the receiver. + var tmp SymDense + tmp.mat = blas64.Symmetric{ + N: s.cap, + Stride: s.mat.Stride, + Data: s.mat.Data, + Uplo: s.mat.Uplo, + } + tmp.cap = s.cap + v.CopySym(&tmp) + return &v + } + v.mat = blas64.Symmetric{ + N: n, + Stride: s.mat.Stride, + Uplo: blas.Upper, + Data: s.mat.Data[:(n-1)*s.mat.Stride+n], + } + v.cap = s.cap + return &v +} + +// PowPSD computes a^pow where a is a positive symmetric definite matrix. +// +// PowPSD returns an error if the matrix is not not positive symmetric definite +// or the Eigendecomposition is not successful. +func (s *SymDense) PowPSD(a Symmetric, pow float64) error { + dim := a.Symmetric() + s.reuseAs(dim) + + var eigen EigenSym + ok := eigen.Factorize(a, true) + if !ok { + return ErrFailedEigen + } + values := eigen.Values(nil) + for i, v := range values { + if v <= 0 { + return ErrNotPSD + } + values[i] = math.Pow(v, pow) + } + var u Dense + u.EigenvectorsSym(&eigen) + + s.SymOuterK(values[0], u.ColView(0)) + + var v VecDense + for i := 1; i < dim; i++ { + v.ColViewOf(&u, i) + s.SymRankOne(s, values[i], &v) + } + return nil +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/triangular.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/triangular.go new file mode 100644 index 00000000..c4446b55 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/triangular.go @@ -0,0 +1,592 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +var ( + triDense *TriDense + _ Matrix = triDense + _ Triangular = triDense + _ RawTriangular = triDense + _ MutableTriangular = triDense + + _ NonZeroDoer = triDense + _ RowNonZeroDoer = triDense + _ ColNonZeroDoer = triDense +) + +const badTriCap = "mat: bad capacity for TriDense" + +// TriDense represents an upper or lower triangular matrix in dense storage +// format. +type TriDense struct { + mat blas64.Triangular + cap int +} + +// Triangular represents a triangular matrix. Triangular matrices are always square. +type Triangular interface { + Matrix + // Triangular returns the number of rows/columns in the matrix and its + // orientation. + Triangle() (n int, kind TriKind) + + // TTri is the equivalent of the T() method in the Matrix interface but + // guarantees the transpose is of triangular type. + TTri() Triangular +} + +// A RawTriangular can return a view of itself as a BLAS Triangular matrix. +type RawTriangular interface { + RawTriangular() blas64.Triangular +} + +// A MutableTriangular can set elements of a triangular matrix. +type MutableTriangular interface { + Triangular + SetTri(i, j int, v float64) +} + +var ( + _ Matrix = TransposeTri{} + _ Triangular = TransposeTri{} + _ UntransposeTrier = TransposeTri{} +) + +// TransposeTri is a type for performing an implicit transpose of a Triangular +// matrix. It implements the Triangular interface, returning values from the +// transpose of the matrix within. +type TransposeTri struct { + Triangular Triangular +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Triangular field. +func (t TransposeTri) At(i, j int) float64 { + return t.Triangular.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. Triangular matrices are +// square and thus this is the same size as the original Triangular. +func (t TransposeTri) Dims() (r, c int) { + c, r = t.Triangular.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Triangular field. +func (t TransposeTri) T() Matrix { + return t.Triangular +} + +// Triangle returns the number of rows/columns in the matrix and its orientation. +func (t TransposeTri) Triangle() (int, TriKind) { + n, upper := t.Triangular.Triangle() + return n, !upper +} + +// TTri performs an implicit transpose by returning the Triangular field. +func (t TransposeTri) TTri() Triangular { + return t.Triangular +} + +// Untranspose returns the Triangular field. +func (t TransposeTri) Untranspose() Matrix { + return t.Triangular +} + +func (t TransposeTri) UntransposeTri() Triangular { + return t.Triangular +} + +// NewTriDense creates a new Triangular matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*n, data is +// used as the backing slice, and changes to the elements of the returned TriDense +// will be reflected in data. If neither of these is true, NewTriDense will panic. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +// Only the values in the triangular portion corresponding to kind are used. +func NewTriDense(n int, kind TriKind, data []float64) *TriDense { + if n < 0 { + panic("mat: negative dimension") + } + if data != nil && len(data) != n*n { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*n) + } + uplo := blas.Lower + if kind == Upper { + uplo = blas.Upper + } + return &TriDense{ + mat: blas64.Triangular{ + N: n, + Stride: n, + Data: data, + Uplo: uplo, + Diag: blas.NonUnit, + }, + cap: n, + } +} + +func (t *TriDense) Dims() (r, c int) { + return t.mat.N, t.mat.N +} + +// Triangle returns the dimension of t and its orientation. The returned +// orientation is only valid when n is not zero. +func (t *TriDense) Triangle() (n int, kind TriKind) { + return t.mat.N, TriKind(!t.IsZero()) && t.triKind() +} + +func (t *TriDense) isUpper() bool { + return isUpperUplo(t.mat.Uplo) +} + +func (t *TriDense) triKind() TriKind { + return TriKind(isUpperUplo(t.mat.Uplo)) +} + +func isUpperUplo(u blas.Uplo) bool { + switch u { + case blas.Upper: + return true + case blas.Lower: + return false + default: + panic(badTriangle) + } +} + +// asSymBlas returns the receiver restructured as a blas64.Symmetric with the +// same backing memory. Panics if the receiver is unit. +// This returns a blas64.Symmetric and not a *SymDense because SymDense can only +// be upper triangular. +func (t *TriDense) asSymBlas() blas64.Symmetric { + if t.mat.Diag == blas.Unit { + panic("mat: cannot convert unit TriDense into blas64.Symmetric") + } + return blas64.Symmetric{ + N: t.mat.N, + Stride: t.mat.Stride, + Data: t.mat.Data, + Uplo: t.mat.Uplo, + } +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (t *TriDense) T() Matrix { + return Transpose{t} +} + +// TTri performs an implicit transpose by returning the receiver inside a TransposeTri. +func (t *TriDense) TTri() Triangular { + return TransposeTri{t} +} + +func (t *TriDense) RawTriangular() blas64.Triangular { + return t.mat +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (t *TriDense) Reset() { + // N and Stride must be zeroed in unison. + t.mat.N, t.mat.Stride = 0, 0 + // Defensively zero Uplo to ensure + // it is set correctly later. + t.mat.Uplo = 0 + t.mat.Data = t.mat.Data[:0] +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. TriDense matrices can be zeroed using Reset. +func (t *TriDense) IsZero() bool { + // It must be the case that t.Dims() returns + // zeros in this case. See comment in Reset(). + return t.mat.Stride == 0 +} + +// untranspose untransposes a matrix if applicable. If a is an Untransposer, then +// untranspose returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func untransposeTri(a Triangular) (Triangular, bool) { + if ut, ok := a.(UntransposeTrier); ok { + return ut.UntransposeTri(), true + } + return a, false +} + +// reuseAs resizes a zero receiver to an n×n triangular matrix with the given +// orientation. If the receiver is non-zero, reuseAs checks that the receiver +// is the correct size and orientation. +func (t *TriDense) reuseAs(n int, kind TriKind) { + if n == 0 { + panic(ErrZeroLength) + } + ul := blas.Lower + if kind == Upper { + ul = blas.Upper + } + if t.mat.N > t.cap { + panic(badTriCap) + } + if t.IsZero() { + t.mat = blas64.Triangular{ + N: n, + Stride: n, + Diag: blas.NonUnit, + Data: use(t.mat.Data, n*n), + Uplo: ul, + } + t.cap = n + return + } + if t.mat.N != n { + panic(ErrShape) + } + if t.mat.Uplo != ul { + panic(ErrTriangle) + } +} + +// isolatedWorkspace returns a new TriDense matrix w with the size of a and +// returns a callback to defer which performs cleanup at the return of the call. +// This should be used when a method receiver is the same pointer as an input argument. +func (t *TriDense) isolatedWorkspace(a Triangular) (w *TriDense, restore func()) { + n, kind := a.Triangle() + if n == 0 { + panic(ErrZeroLength) + } + w = getWorkspaceTri(n, kind, false) + return w, func() { + t.Copy(w) + putWorkspaceTri(w) + } +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. Only elements within the +// receiver's non-zero triangle are set. +// +// See the Copier interface for more information. +func (t *TriDense) Copy(a Matrix) (r, c int) { + r, c = a.Dims() + r = min(r, t.mat.N) + c = min(c, t.mat.N) + if r == 0 || c == 0 { + return 0, 0 + } + + switch a := a.(type) { + case RawMatrixer: + amat := a.RawMatrix() + if t.isUpper() { + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) + } + } else { + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) + } + } + case RawTriangular: + amat := a.RawTriangular() + aIsUpper := isUpperUplo(amat.Uplo) + tIsUpper := t.isUpper() + switch { + case tIsUpper && aIsUpper: + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) + } + case !tIsUpper && !aIsUpper: + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) + } + default: + for i := 0; i < r; i++ { + t.set(i, i, amat.Data[i*amat.Stride+i]) + } + } + default: + isUpper := t.isUpper() + for i := 0; i < r; i++ { + if isUpper { + for j := i; j < c; j++ { + t.set(i, j, a.At(i, j)) + } + } else { + for j := 0; j <= i; j++ { + t.set(i, j, a.At(i, j)) + } + } + } + } + + return r, c +} + +// InverseTri computes the inverse of the triangular matrix a, storing the result +// into the receiver. If a is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally be +// avoided where possible, for example by using the Solve routines. +func (t *TriDense) InverseTri(a Triangular) error { + t.checkOverlapMatrix(a) + n, _ := a.Triangle() + t.reuseAs(a.Triangle()) + t.Copy(a) + work := getFloats(3*n, false) + iwork := getInts(n, false) + cond := lapack64.Trcon(CondNorm, t.mat, work, iwork) + putFloats(work) + putInts(iwork) + if math.IsInf(cond, 1) { + return Condition(cond) + } + ok := lapack64.Trtri(t.mat) + if !ok { + return Condition(math.Inf(1)) + } + if cond > ConditionTolerance { + return Condition(cond) + } + return nil +} + +// MulTri takes the product of triangular matrices a and b and places the result +// in the receiver. The size of a and b must match, and they both must have the +// same TriKind, or Mul will panic. +func (t *TriDense) MulTri(a, b Triangular) { + n, kind := a.Triangle() + nb, kindb := b.Triangle() + if n != nb { + panic(ErrShape) + } + if kind != kindb { + panic(ErrTriangle) + } + + aU, _ := untransposeTri(a) + bU, _ := untransposeTri(b) + t.checkOverlapMatrix(bU) + t.checkOverlapMatrix(aU) + t.reuseAs(n, kind) + var restore func() + if t == aU { + t, restore = t.isolatedWorkspace(aU) + defer restore() + } else if t == bU { + t, restore = t.isolatedWorkspace(bU) + defer restore() + } + + // TODO(btracey): Improve the set of fast-paths. + if kind == Upper { + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + var v float64 + for k := i; k <= j; k++ { + v += a.At(i, k) * b.At(k, j) + } + t.SetTri(i, j, v) + } + } + return + } + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + var v float64 + for k := j; k <= i; k++ { + v += a.At(i, k) * b.At(k, j) + } + t.SetTri(i, j, v) + } + } +} + +// ScaleTri multiplies the elements of a by f, placing the result in the receiver. +// If the receiver is non-zero, the size and kind of the receiver must match +// the input, or ScaleTri will panic. +func (t *TriDense) ScaleTri(f float64, a Triangular) { + n, kind := a.Triangle() + t.reuseAs(n, kind) + + // TODO(btracey): Improve the set of fast-paths. + switch a := a.(type) { + case RawTriangular: + amat := a.RawTriangular() + if t != a { + t.checkOverlap(generalFromTriangular(amat)) + } + if kind == Upper { + for i := 0; i < n; i++ { + ts := t.mat.Data[i*t.mat.Stride+i : i*t.mat.Stride+n] + as := amat.Data[i*amat.Stride+i : i*amat.Stride+n] + for i, v := range as { + ts[i] = v * f + } + } + return + } + for i := 0; i < n; i++ { + ts := t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+i+1] + as := amat.Data[i*amat.Stride : i*amat.Stride+i+1] + for i, v := range as { + ts[i] = v * f + } + } + return + default: + t.checkOverlapMatrix(a) + isUpper := kind == Upper + for i := 0; i < n; i++ { + if isUpper { + for j := i; j < n; j++ { + t.set(i, j, f*a.At(i, j)) + } + } else { + for j := 0; j <= i; j++ { + t.set(i, j, f*a.At(i, j)) + } + } + } + } +} + +// copySymIntoTriangle copies a symmetric matrix into a TriDense +func copySymIntoTriangle(t *TriDense, s Symmetric) { + n, upper := t.Triangle() + ns := s.Symmetric() + if n != ns { + panic("mat: triangle size mismatch") + } + ts := t.mat.Stride + if rs, ok := s.(RawSymmetricer); ok { + sd := rs.RawSymmetric() + ss := sd.Stride + if upper { + if sd.Uplo == blas.Upper { + for i := 0; i < n; i++ { + copy(t.mat.Data[i*ts+i:i*ts+n], sd.Data[i*ss+i:i*ss+n]) + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + t.mat.Data[i*ts+j] = sd.Data[j*ss+i] + } + } + return + } + if sd.Uplo == blas.Upper { + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + t.mat.Data[i*ts+j] = sd.Data[j*ss+i] + } + } + return + } + for i := 0; i < n; i++ { + copy(t.mat.Data[i*ts:i*ts+i+1], sd.Data[i*ss:i*ss+i+1]) + } + return + } + if upper { + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + t.mat.Data[i*ts+j] = s.At(i, j) + } + } + return + } + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + t.mat.Data[i*ts+j] = s.At(i, j) + } + } +} + +// DoNonZero calls the function fn for each of the non-zero elements of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoNonZero(fn func(i, j int, v float64)) { + if t.isUpper() { + for i := 0; i < t.mat.N; i++ { + for j := i; j < t.mat.N; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } + return + } + for i := 0; i < t.mat.N; i++ { + for j := 0; j <= i; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || t.mat.N <= i { + panic(ErrRowAccess) + } + if t.isUpper() { + for j := i; j < t.mat.N; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + return + } + for j := 0; j <= i; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || t.mat.N <= j { + panic(ErrColAccess) + } + if t.isUpper() { + for i := 0; i <= j; i++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + return + } + for i := j; i < t.mat.N; i++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} diff --git a/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/vector.go b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/vector.go new file mode 100644 index 00000000..ca29e503 --- /dev/null +++ b/vendor/k8s.io/code-generator/vendor/gonum.org/v1/gonum/mat/vector.go @@ -0,0 +1,730 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var ( + vector *VecDense + + _ Matrix = vector + _ Vector = vector + _ Reseter = vector +) + +// Vector is a vector. +type Vector interface { + Matrix + AtVec(int) float64 + Len() int +} + +// TransposeVec is a type for performing an implicit transpose of a Vector. +// It implements the Vector interface, returning values from the transpose +// of the vector within. +type TransposeVec struct { + Vector Vector +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Vector field. +func (t TransposeVec) At(i, j int) float64 { + return t.Vector.At(j, i) +} + +// AtVec returns the element at position i. It panics if i is out of bounds. +func (t TransposeVec) AtVec(i int) float64 { + return t.Vector.AtVec(i) +} + +// Dims returns the dimensions of the transposed vector. +func (t TransposeVec) Dims() (r, c int) { + c, r = t.Vector.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Vector field. +func (t TransposeVec) T() Matrix { + return t.Vector +} + +// Len returns the number of columns in the vector. +func (t TransposeVec) Len() int { + return t.Vector.Len() +} + +// TVec performs an implicit transpose by returning the Vector field. +func (t TransposeVec) TVec() Vector { + return t.Vector +} + +// Untranspose returns the Vector field. +func (t TransposeVec) Untranspose() Matrix { + return t.Vector +} + +func (t TransposeVec) UntransposeVec() Vector { + return t.Vector +} + +// VecDense represents a column vector. +type VecDense struct { + mat blas64.Vector + n int + // A BLAS vector can have a negative increment, but allowing this + // in the mat type complicates a lot of code, and doesn't gain anything. + // VecDense must have positive increment in this package. +} + +// NewVecDense creates a new VecDense of length n. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n, data is +// used as the backing slice, and changes to the elements of the returned VecDense +// will be reflected in data. If neither of these is true, NewVecDense will panic. +func NewVecDense(n int, data []float64) *VecDense { + if n < 0 { + panic("mat: negative dimension") + } + if len(data) != n && data != nil { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n) + } + return &VecDense{ + mat: blas64.Vector{ + Inc: 1, + Data: data, + }, + n: n, + } +} + +// SliceVec returns a new Vector that shares backing data with the receiver. +// The returned matrix starts at i of the receiver and extends k-i elements. +// SliceVec panics with ErrIndexOutOfRange if the slice is outside the capacity +// of the receiver. +func (v *VecDense) SliceVec(i, k int) Vector { + if i < 0 || k <= i || v.Cap() < k { + panic(ErrIndexOutOfRange) + } + return &VecDense{ + n: k - i, + mat: blas64.Vector{ + Inc: v.mat.Inc, + Data: v.mat.Data[i*v.mat.Inc : (k-1)*v.mat.Inc+1], + }, + } +} + +// Dims returns the number of rows and columns in the matrix. Columns is always 1 +// for a non-Reset vector. +func (v *VecDense) Dims() (r, c int) { + if v.IsZero() { + return 0, 0 + } + return v.n, 1 +} + +// Caps returns the number of rows and columns in the backing matrix. Columns is always 1 +// for a non-Reset vector. +func (v *VecDense) Caps() (r, c int) { + if v.IsZero() { + return 0, 0 + } + return v.Cap(), 1 +} + +// Len returns the length of the vector. +func (v *VecDense) Len() int { + return v.n +} + +// Cap returns the capacity of the vector. +func (v *VecDense) Cap() int { + if v.IsZero() { + return 0 + } + return (cap(v.mat.Data)-1)/v.mat.Inc + 1 +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (v *VecDense) T() Matrix { + return Transpose{v} +} + +// TVec performs an implicit transpose by returning the receiver inside a TransposeVec. +func (v *VecDense) TVec() Vector { + return TransposeVec{v} +} + +// Reset zeros the length of the vector so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (v *VecDense) Reset() { + // No change of Inc or n to 0 may be + // made unless both are set to 0. + v.mat.Inc = 0 + v.n = 0 + v.mat.Data = v.mat.Data[:0] +} + +// CloneVec makes a copy of a into the receiver, overwriting the previous value +// of the receiver. +func (v *VecDense) CloneVec(a Vector) { + if v == a { + return + } + v.n = a.Len() + v.mat = blas64.Vector{ + Inc: 1, + Data: use(v.mat.Data, v.n), + } + if r, ok := a.(RawVectorer); ok { + blas64.Copy(v.n, r.RawVector(), v.mat) + return + } + for i := 0; i < a.Len(); i++ { + v.SetVec(i, a.AtVec(i)) + } +} + +// VecDenseCopyOf returns a newly allocated copy of the elements of a. +func VecDenseCopyOf(a Vector) *VecDense { + v := &VecDense{} + v.CloneVec(a) + return v +} + +func (v *VecDense) RawVector() blas64.Vector { + return v.mat +} + +// CopyVec makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two vectors and +// returns the number of elements it copied. +func (v *VecDense) CopyVec(a Vector) int { + n := min(v.Len(), a.Len()) + if v == a { + return n + } + if r, ok := a.(RawVectorer); ok { + blas64.Copy(n, r.RawVector(), v.mat) + return n + } + for i := 0; i < n; i++ { + v.setVec(i, a.AtVec(i)) + } + return n +} + +// ScaleVec scales the vector a by alpha, placing the result in the receiver. +func (v *VecDense) ScaleVec(alpha float64, a Vector) { + n := a.Len() + + if v == a { + if v.mat.Inc == 1 { + f64.ScalUnitary(alpha, v.mat.Data) + return + } + f64.ScalInc(alpha, v.mat.Data, uintptr(n), uintptr(v.mat.Inc)) + return + } + + v.reuseAs(n) + + if rv, ok := a.(RawVectorer); ok { + mat := rv.RawVector() + v.checkOverlap(mat) + if v.mat.Inc == 1 && mat.Inc == 1 { + f64.ScalUnitaryTo(v.mat.Data, alpha, mat.Data) + return + } + f64.ScalIncTo(v.mat.Data, uintptr(v.mat.Inc), + alpha, mat.Data, uintptr(n), uintptr(mat.Inc)) + return + } + + for i := 0; i < n; i++ { + v.setVec(i, alpha*a.AtVec(i)) + } +} + +// AddScaledVec adds the vectors a and alpha*b, placing the result in the receiver. +func (v *VecDense) AddScaledVec(a Vector, alpha float64, b Vector) { + if alpha == 1 { + v.AddVec(a, b) + return + } + if alpha == -1 { + v.SubVec(a, b) + return + } + + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + var amat, bmat blas64.Vector + fast := true + aU, _ := untranspose(a) + if rv, ok := aU.(RawVectorer); ok { + amat = rv.RawVector() + if v != a { + v.checkOverlap(amat) + } + } else { + fast = false + } + bU, _ := untranspose(b) + if rv, ok := bU.(RawVectorer); ok { + bmat = rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + } else { + fast = false + } + + v.reuseAs(ar) + + switch { + case alpha == 0: // v <- a + if v == a { + return + } + v.CopyVec(a) + case v == a && v == b: // v <- v + alpha * v = (alpha + 1) * v + blas64.Scal(ar, alpha+1, v.mat) + case !fast: // v <- a + alpha * b without blas64 support. + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)+alpha*b.AtVec(i)) + } + case v == a && v != b: // v <- v + alpha * b + if v.mat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) + } else { + f64.AxpyInc(alpha, bmat.Data, v.mat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(v.mat.Inc), 0, 0) + } + default: // v <- a + alpha * b or v <- a + alpha * v + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) + } else { + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + alpha, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + } + } +} + +// AddVec adds the vectors a and b, placing the result in the receiver. +func (v *VecDense) AddVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, 1, bmat.Data, amat.Data) + return + } + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + 1, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)+b.AtVec(i)) + } +} + +// SubVec subtracts the vector b from a, placing the result in the receiver. +func (v *VecDense) SubVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, -1, bmat.Data, amat.Data) + return + } + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + -1, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)-b.AtVec(i)) + } +} + +// MulElemVec performs element-wise multiplication of a and b, placing the result +// in the receiver. +func (v *VecDense) MulElemVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + for i, a := range amat.Data { + v.mat.Data[i] = a * bmat.Data[i] + } + return + } + var ia, ib int + for i := 0; i < ar; i++ { + v.setVec(i, amat.Data[ia]*bmat.Data[ib]) + ia += amat.Inc + ib += bmat.Inc + } + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)*b.AtVec(i)) + } +} + +// DivElemVec performs element-wise division of a by b, placing the result +// in the receiver. +func (v *VecDense) DivElemVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + for i, a := range amat.Data { + v.setVec(i, a/bmat.Data[i]) + } + return + } + var ia, ib int + for i := 0; i < ar; i++ { + v.setVec(i, amat.Data[ia]/bmat.Data[ib]) + ia += amat.Inc + ib += bmat.Inc + } + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)/b.AtVec(i)) + } +} + +// MulVec computes a * b. The result is stored into the receiver. +// MulVec panics if the number of columns in a does not equal the number of rows in b +// or if the number of columns in b does not equal 1. +func (v *VecDense) MulVec(a Matrix, b Vector) { + r, c := a.Dims() + br, bc := b.Dims() + if c != br || bc != 1 { + panic(ErrShape) + } + + aU, trans := untranspose(a) + var bmat blas64.Vector + fast := true + bU, _ := untranspose(b) + if rv, ok := bU.(RawVectorer); ok { + bmat = rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + } else { + fast = false + } + + v.reuseAs(r) + var restore func() + if v == aU { + v, restore = v.isolatedWorkspace(aU.(*VecDense)) + defer restore() + } else if v == b { + v, restore = v.isolatedWorkspace(b) + defer restore() + } + + // TODO(kortschak): Improve the non-fast paths. + switch aU := aU.(type) { + case Vector: + if b.Len() == 1 { + // {n,1} x {1,1} + v.ScaleVec(b.AtVec(0), aU) + return + } + + // {1,n} x {n,1} + if fast { + if rv, ok := aU.(RawVectorer); ok { + amat := rv.RawVector() + if v != aU { + v.checkOverlap(amat) + } + + if amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + v.setVec(0, f64.DotUnitary(amat.Data, bmat.Data)) + return + } + v.setVec(0, f64.DotInc(amat.Data, bmat.Data, + uintptr(c), uintptr(amat.Inc), uintptr(bmat.Inc), 0, 0)) + return + } + } + var sum float64 + for i := 0; i < c; i++ { + sum += aU.AtVec(i) * b.AtVec(i) + } + v.setVec(0, sum) + return + case RawSymmetricer: + if fast { + amat := aU.RawSymmetric() + // We don't know that a is a *SymDense, so make + // a temporary SymDense to check overlap. + (&SymDense{mat: amat}).checkOverlap(v.asGeneral()) + blas64.Symv(1, amat, bmat, 0, v.mat) + return + } + case RawTriangular: + v.CopyVec(b) + amat := aU.RawTriangular() + // We don't know that a is a *TriDense, so make + // a temporary TriDense to check overlap. + (&TriDense{mat: amat}).checkOverlap(v.asGeneral()) + ta := blas.NoTrans + if trans { + ta = blas.Trans + } + blas64.Trmv(ta, amat, v.mat) + case RawMatrixer: + if fast { + amat := aU.RawMatrix() + // We don't know that a is a *Dense, so make + // a temporary Dense to check overlap. + (&Dense{mat: amat}).checkOverlap(v.asGeneral()) + t := blas.NoTrans + if trans { + t = blas.Trans + } + blas64.Gemv(t, 1, amat, bmat, 0, v.mat) + return + } + default: + if fast { + for i := 0; i < r; i++ { + var f float64 + for j := 0; j < c; j++ { + f += a.At(i, j) * bmat.Data[j*bmat.Inc] + } + v.setVec(i, f) + } + return + } + } + + for i := 0; i < r; i++ { + var f float64 + for j := 0; j < c; j++ { + f += a.At(i, j) * b.AtVec(j) + } + v.setVec(i, f) + } +} + +// reuseAs resizes an empty vector to a r×1 vector, +// or checks that a non-empty matrix is r×1. +func (v *VecDense) reuseAs(r int) { + if r == 0 { + panic(ErrZeroLength) + } + if v.IsZero() { + v.mat = blas64.Vector{ + Inc: 1, + Data: use(v.mat.Data, r), + } + v.n = r + return + } + if r != v.n { + panic(ErrShape) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized vectors can be the +// receiver for size-restricted operations. VecDenses can be zeroed using Reset. +func (v *VecDense) IsZero() bool { + // It must be the case that v.Dims() returns + // zeros in this case. See comment in Reset(). + return v.mat.Inc == 0 +} + +func (v *VecDense) isolatedWorkspace(a Vector) (n *VecDense, restore func()) { + l := a.Len() + if l == 0 { + panic(ErrZeroLength) + } + n = getWorkspaceVec(l, false) + return n, func() { + v.CopyVec(n) + putWorkspaceVec(n) + } +} + +// asDense returns a Dense representation of the receiver with the same +// underlying data. +func (v *VecDense) asDense() *Dense { + return &Dense{ + mat: v.asGeneral(), + capRows: v.n, + capCols: 1, + } +} + +// asGeneral returns a blas64.General representation of the receiver with the +// same underlying data. +func (v *VecDense) asGeneral() blas64.General { + return blas64.General{ + Rows: v.n, + Cols: 1, + Stride: v.mat.Inc, + Data: v.mat.Data, + } +} + +// ColViewOf reflects the column j of the RawMatrixer m, into the receiver +// backed by the same underlying data. The length of the receiver must either be +// zero or match the number of rows in m. +func (v *VecDense) ColViewOf(m RawMatrixer, j int) { + rm := m.RawMatrix() + + if j >= rm.Cols || j < 0 { + panic(ErrColAccess) + } + if !v.IsZero() && v.n != rm.Rows { + panic(ErrShape) + } + + v.mat.Inc = rm.Stride + v.mat.Data = rm.Data[j : (rm.Rows-1)*rm.Stride+j+1] + v.n = rm.Rows +} + +// RowViewOf reflects the row i of the RawMatrixer m, into the receiver +// backed by the same underlying data. The length of the receiver must either be +// zero or match the number of columns in m. +func (v *VecDense) RowViewOf(m RawMatrixer, i int) { + rm := m.RawMatrix() + + if i >= rm.Rows || i < 0 { + panic(ErrRowAccess) + } + if !v.IsZero() && v.n != rm.Cols { + panic(ErrShape) + } + + v.mat.Inc = 1 + v.mat.Data = rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] + v.n = rm.Cols +}