IMAGE_TAG := $(shell echo $(IMAGE_VERSION) | cut -d. -f1,2)
IMAGE_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD | tr -d '/')
-#Currently, we choose golang-alpine based image as the default image base due to its
+#Currently, we choose golang-alpine based image as the default image base due to its
#small size
base:
@IMAGE_NAME="$(REGISTRY)/iec-apiserver-base"; \
./manifest-tool push from-args --platforms $(PLATFORMS) --template $(REGISTRY)/$(TARGET)-ARCH:$(VERSION) --target $(REGISTRY)/$(TARGET):$(VERSION)
-#Note: Currently, just multi-arch the latest version of the images to avoid the possible version
+#Note: Currently, just multi-arch the latest version of the images to avoid the possible version
#mismatch between amd64 and arm64 in docker hub. In case we need them, we can just remove the comment tags below
#Just multi-arch the iec-apiserver image currently
func (_ tApp) Index(
) string {
args := make(map[string]string)
-
+
return revel.MainRouter.Reverse("App.Index", args).URL
}
func (_ tApp) GetStatus(
) string {
args := make(map[string]string)
-
+
return revel.MainRouter.Reverse("App.GetStatus", args).URL
}
filepath string,
) string {
args := make(map[string]string)
-
+
revel.Unbind(args, "prefix", prefix)
revel.Unbind(args, "filepath", filepath)
return revel.MainRouter.Reverse("Static.Serve", args).URL
filepath string,
) string {
args := make(map[string]string)
-
+
revel.Unbind(args, "prefix", prefix)
revel.Unbind(args, "filepath", filepath)
return revel.MainRouter.Reverse("Static.ServeDir", args).URL
filepath string,
) string {
args := make(map[string]string)
-
+
revel.Unbind(args, "moduleName", moduleName)
revel.Unbind(args, "prefix", prefix)
revel.Unbind(args, "filepath", filepath)
filepath string,
) string {
args := make(map[string]string)
-
+
revel.Unbind(args, "moduleName", moduleName)
revel.Unbind(args, "prefix", prefix)
revel.Unbind(args, "filepath", filepath)
func (_ tTestRunner) Index(
) string {
args := make(map[string]string)
-
+
return revel.MainRouter.Reverse("TestRunner.Index", args).URL
}
suite string,
) string {
args := make(map[string]string)
-
+
revel.Unbind(args, "suite", suite)
return revel.MainRouter.Reverse("TestRunner.Suite", args).URL
}
test string,
) string {
args := make(map[string]string)
-
+
revel.Unbind(args, "suite", suite)
revel.Unbind(args, "test", test)
return revel.MainRouter.Reverse("TestRunner.Run", args).URL
func (_ tTestRunner) List(
) string {
args := make(map[string]string)
-
+
return revel.MainRouter.Reverse("TestRunner.List", args).URL
}
// Register all the controllers
func Register() {
revel.AppLog.Info("Running revel server")
-
+
revel.RegisterController((*controllers.App)(nil),
[]*revel.MethodType{
&revel.MethodType{
Name: "Index",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
},
- RenderArgNames: map[int][]string{
- 12: []string{
+ RenderArgNames: map[int][]string{
+ 12: []string{
},
},
},
&revel.MethodType{
Name: "GetStatus",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
},
- RenderArgNames: map[int][]string{
+ RenderArgNames: map[int][]string{
},
},
-
+
})
-
+
revel.RegisterController((*controllers0.Static)(nil),
[]*revel.MethodType{
&revel.MethodType{
Name: "Serve",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
&revel.MethodArg{Name: "prefix", Type: reflect.TypeOf((*string)(nil)) },
&revel.MethodArg{Name: "filepath", Type: reflect.TypeOf((*string)(nil)) },
},
- RenderArgNames: map[int][]string{
+ RenderArgNames: map[int][]string{
},
},
&revel.MethodType{
Name: "ServeDir",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
&revel.MethodArg{Name: "prefix", Type: reflect.TypeOf((*string)(nil)) },
&revel.MethodArg{Name: "filepath", Type: reflect.TypeOf((*string)(nil)) },
},
- RenderArgNames: map[int][]string{
+ RenderArgNames: map[int][]string{
},
},
&revel.MethodType{
Name: "ServeModule",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
&revel.MethodArg{Name: "moduleName", Type: reflect.TypeOf((*string)(nil)) },
&revel.MethodArg{Name: "prefix", Type: reflect.TypeOf((*string)(nil)) },
&revel.MethodArg{Name: "filepath", Type: reflect.TypeOf((*string)(nil)) },
},
- RenderArgNames: map[int][]string{
+ RenderArgNames: map[int][]string{
},
},
&revel.MethodType{
Name: "ServeModuleDir",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
&revel.MethodArg{Name: "moduleName", Type: reflect.TypeOf((*string)(nil)) },
&revel.MethodArg{Name: "prefix", Type: reflect.TypeOf((*string)(nil)) },
&revel.MethodArg{Name: "filepath", Type: reflect.TypeOf((*string)(nil)) },
},
- RenderArgNames: map[int][]string{
+ RenderArgNames: map[int][]string{
},
},
-
+
})
-
+
revel.RegisterController((*controllers1.TestRunner)(nil),
[]*revel.MethodType{
&revel.MethodType{
Name: "Index",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
},
- RenderArgNames: map[int][]string{
- 76: []string{
+ RenderArgNames: map[int][]string{
+ 76: []string{
"testSuites",
},
},
},
&revel.MethodType{
Name: "Suite",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
&revel.MethodArg{Name: "suite", Type: reflect.TypeOf((*string)(nil)) },
},
- RenderArgNames: map[int][]string{
+ RenderArgNames: map[int][]string{
},
},
&revel.MethodType{
Name: "Run",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
&revel.MethodArg{Name: "suite", Type: reflect.TypeOf((*string)(nil)) },
&revel.MethodArg{Name: "test", Type: reflect.TypeOf((*string)(nil)) },
},
- RenderArgNames: map[int][]string{
- 125: []string{
+ RenderArgNames: map[int][]string{
+ 125: []string{
},
},
},
&revel.MethodType{
Name: "List",
- Args: []*revel.MethodArg{
+ Args: []*revel.MethodArg{
},
- RenderArgNames: map[int][]string{
+ RenderArgNames: map[int][]string{
},
},
-
+
})
-
- revel.DefaultValidationKeys = map[string]map[int]string{
+
+ revel.DefaultValidationKeys = map[string]map[int]string{
}
- testing.TestSuites = []interface{}{
+ testing.TestSuites = []interface{}{
(*tests.AppTest)(nil),
}
}
# sending data before the entire template has been fully rendered.
results.chunked = false
-# Compression of your HTML and CSS files with gzip typically saves around
-# fifty to seventy percent of the file size. This means that it takes less
-# time to load your pages, and less bandwidth is used over all.
+# Compression of your HTML and CSS files with gzip typically saves around
+# fifty to seventy percent of the file size. This means that it takes less
+# time to load your pages, and less bandwidth is used over all.
# To enable compression, set value to true.
results.compressed = false
# Watch your applicaton files for changes and automatically rebuild
# Values:
# "true"
-# Enables auto rebuilding.
+# Enables auto rebuilding.
# "false"
# Disables auto rebuilding.
watch = true
# Maintenance Release
-This release is focused on improving the security and resolving some issues.
+This release is focused on improving the security and resolving some issues.
**There are no breaking changes from version 0.18**
## New items
* Server Engine revel/revel#998
The server engine implementation is described in the [docs](http://revel.github.io/manual/server-engine.html)
-* Allow binding to a structured map. revel/revel#998
+* Allow binding to a structured map. revel/revel#998
Have a structure inside a map object which will be realized properly from params
* Gorm module revel/modules/#51
Added transaction controller
Start the application without doing a request first using revel run ....
* Logger update revel/revel#1213
Configurable logger and added context logging on controller via controller.Log
-* Before after finally panic controller method detection revel/revel#1211
+* Before after finally panic controller method detection revel/revel#1211
Controller methods will be automatically detected and called - similar to interceptors but without the extra code
* Float validation revel/revel#1209
Added validation for floats
Added ability to specify authorization to access the jobs module routes
* Add MessageKey, ErrorKey methods to ValidationResult object revel/revel#1215
This allows the message translator to translate the keys added. So model objects can send out validation codes
-* Vendor friendlier - Revel recognizes and uses `deps` (to checkout go libraries) if a vendor folder exists in the project root.
+* Vendor friendlier - Revel recognizes and uses `deps` (to checkout go libraries) if a vendor folder exists in the project root.
* Updated examples to use Gorp modules and new loggers
### Breaking Changes
-* `http.Request` is no longer contained in `revel.Request` revel.Request remains functionally the same but
+* `http.Request` is no longer contained in `revel.Request` revel.Request remains functionally the same but
you cannot extract the `http.Request` from it. You can get the `http.Request` from `revel.Controller.Request.In.GetRaw().(*http.Request)`
* `http.Response.Out` Is not the http.Response and is deprecated, you can get the output writer by doing `http.Response.GetWriter()`. You can get the `http.Response` from revel.Controller.Response.Out.Server.GetRaw().(*http.Response)`
-* `Websocket` changes. `revel.ServerWebsocket` is the new type of object you need to declare for controllers
+* `Websocket` changes. `revel.ServerWebsocket` is the new type of object you need to declare for controllers
which should need to attach to websockets. Implementation of these objects have been simplified
Old
}
}()
```
-* GORM module has been refactored into modules/orm/gorm
+* GORM module has been refactored into modules/orm/gorm
### Deprecated methods
* Pluggable server engine support. You can now implement **your own server engine**. This means if you need to listen to more then 1 IP address or port you can implement a custom server engine to do this. By default Revel uses GO http server, but also available is fasthttp server in the revel/modules repository. See the docs for more information on how to implement your own engine.
### Enhancements
-* Controller instances are cached for reuse. This speeds up the request response time and prevents unnecessary garbage collection cycles.
+* Controller instances are cached for reuse. This speeds up the request response time and prevents unnecessary garbage collection cycles.
### Bug fixes
### Features
-* Modular Template Engine #1170
+* Modular Template Engine #1170
* Pongo2 engine driver added revel/modules#39
* Ace engine driver added revel/modules#40
-* Added i18n template support #746
+* Added i18n template support #746
### Enhancements
-* JSON request binding #1161
-* revel.SetSecretKey function added #1127
-* ResolveFormat now looks at the extension as well (this sets the content type) #936
+* JSON request binding #1161
+* revel.SetSecretKey function added #1127
+* ResolveFormat now looks at the extension as well (this sets the content type) #936
* Updated command to run tests using the configuration revel/cmd#61
### Bug fixes
* Updated documentation typos revel/modules#37
-* Updated order of parameter map assignment #1155
-* Updated cookie lifetime for firefox #1174
-* Added test path for modules, so modules will run tests as well #1162
+* Updated order of parameter map assignment #1155
+* Updated cookie lifetime for firefox #1174
+* Added test path for modules, so modules will run tests as well #1162
* Fixed go profiler module revel/modules#20
# Revel Framework
-[](http://travis-ci.org/revel/revel)
+[](http://travis-ci.org/revel/revel)
[](LICENSE)
[](https://goreportcard.com/report/github.com/revel/revel)
-greeting=Hallo
+greeting=Hallo
greeting.name=Rob
greeting.suffix=, welkom bij Revel!
-greeting=Hello
+greeting=Hello
greeting.name=Rob
greeting.suffix=, welcome to Revel!
provisioner "local-exec" {
command = <<EOT
- touch token
+ touch token
mkdir /root/.ssh && chmod 0700 /root/.ssh
ssh-keyscan -H ${self.public_dns} >> ~/.ssh/known_hosts
scp -i terraform.pem ubuntu@${self.public_dns}:/microk8s.join_token .
-------------
1. Install terraform - https://www.terraform.io/downloads.html
- (a)Downlaod the zip file based on the server type.
- (b)Unzip the file to get the terraform binary.
+ (a)Downlaod the zip file based on the server type.
+ (b)Unzip the file to get the terraform binary.
(c)Currently supported ubuntu version is 18.04
2. IAM Access Keys - Permissions required for running the template - AmazonEC2FullAccess
Terraform template
------------------
-The template contains main.tf file, variable.tf file, pem file (add your pem file here) and worker_user_data.tmpl
+The template contains main.tf file, variable.tf file, pem file (add your pem file here) and worker_user_data.tmpl
You can move the pem file to the the directory where this template resides or you can change the location of the pem file in the main.tf file.
-
+
Master's main.tf file
--------------------
The first step to using Terraform is typically to configure the provider(s) you want to use.
region = var.aws_region
}
-The user_data installs the microk8s inside the EC2 instance.
+The user_data installs the microk8s inside the EC2 instance.
#!/bin/bash
sudo su
microk8s add-node > microk8s.join_token
microk8s config > configFile
-Since terraform does not wait until the user_data is executed, we exec into the instace by using the 'remote-exec' type provisioner and add the following script. This script will make terraform wait util microk8s.join-token file is created.
+Since terraform does not wait until the user_data is executed, we exec into the instace by using the 'remote-exec' type provisioner and add the following script. This script will make terraform wait util microk8s.join-token file is created.
provisioner "remote-exec" {
inline = ["until [ -f /microk8s.join_token ]; do sleep 5; done; cat /microk8s.join_token"]
description = "instance_type"
default = "t2.small"
}
-Other resource specific values like aws_region, aws_ami, vpc and the subenet can also be changed the same way by editing the variable.tf file.
+Other resource specific values like aws_region, aws_ami, vpc and the subenet can also be changed the same way by editing the variable.tf file.
Apply terraform
---------------
-To create a master node with microk8s, run the following commands.
+To create a master node with microk8s, run the following commands.
terraform init
terraform plan
terraform apply
-Once the worked nodes are created, it will be connected to the master. A multi-node k8s cluster will be provisioned with calico CNI.
+Once the worked nodes are created, it will be connected to the master. A multi-node k8s cluster will be provisioned with calico CNI.
resources:
- endpointslices
verbs:
- - watch
+ - watch
- list
- apiGroups: [""]
resources:
spec:
selector:
matchLabels:
- name: sriov-cni
+ name: sriov-cni
template:
metadata:
labels:
# Create the service account and policies.
# ovnkube interacts with kubernetes and the environment
# must be properly set up.
-#
+#
# This provisioning is done as part of installation after the cluster is
# up and before the ovn daemonsets are created.
protocol: TCP
targetPort: 9409
---
-
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
port: 9410
protocol: TCP
targetPort: 9410
-
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
capabilities:
add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"]
-
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# for the iptables wrapper
name: host-ovn-cert
readOnly: true
-
resources:
requests:
cpu: 100m
hostPath:
path: /etc/origin/openvswitch
-
tolerations:
- operator: "Exists"
release: istio\r
istio: galley\r
data:\r
- validatingwebhookconfiguration.yaml: |- \r
+ validatingwebhookconfiguration.yaml: |-\r
apiVersion: admissionregistration.k8s.io/v1beta1\r
kind: ValidatingWebhookConfiguration\r
metadata:\r
release: istio\r
istio: grafana\r
data:\r
- custom-resources.yaml: |- \r
+ custom-resources.yaml: |-\r
apiVersion: authentication.istio.io/v1alpha1\r
kind: Policy\r
metadata:\r
- name: grafana\r
ports:\r
- number: 3000\r
- run.sh: |- \r
+ run.sh: |-\r
#!/bin/sh\r
- \r
+\r
set -x\r
- \r
+\r
if [ "$#" -ne "1" ]; then\r
echo "first argument should be path to custom resource yaml"\r
exit 1\r
fi\r
- \r
+\r
pathToResourceYAML=${1}\r
- \r
+\r
kubectl get validatingwebhookconfiguration istio-galley 2>/dev/null\r
if [ "$?" -eq 0 ]; then\r
echo "istio-galley validatingwebhookconfiguration found - waiting for istio-galley deployment to be ready"\r
fi\r
sleep 5\r
kubectl apply -f ${pathToResourceYAML}\r
- \r
+\r
\r
---\r
# Source: istio/charts/grafana/templates/configmap-dashboards.yaml\r
orgId: 1\r
type: prometheus\r
url: http://prometheus:9090\r
- \r
+\r
dashboardproviders.yaml: |\r
apiVersion: 1\r
providers:\r
path: /var/lib/grafana/dashboards/istio\r
orgId: 1\r
type: file\r
- \r
+\r
---\r
# Source: istio/charts/kiali/templates/configmap.yaml\r
apiVersion: v1\r
web_root: /kiali\r
external_services:\r
tracing:\r
- url: \r
+ url:\r
grafana:\r
- url: \r
+ url:\r
prometheus:\r
url: http://prometheus:9090\r
\r
sdsUdsPath:\r
\r
# This flag is used by secret discovery service(SDS).\r
- # If set to true(prerequisite: https://kubernetes.io/docs/concepts/storage/volumes/#projected), Istio will inject volumes mount \r
- # for k8s service account JWT, so that K8s API server mounts k8s service account JWT to envoy container, which \r
+ # If set to true(prerequisite: https://kubernetes.io/docs/concepts/storage/volumes/#projected), Istio will inject volumes mount\r
+ # for k8s service account JWT, so that K8s API server mounts k8s service account JWT to envoy container, which\r
# will be used to generate key/cert eventually. This isn't supported for non-k8s case.\r
enableSdsTokenMount: false\r
\r
# This flag is used by secret discovery service(SDS).\r
- # If set to true, envoy will fetch normal k8s service account JWT from '/var/run/secrets/kubernetes.io/serviceaccount/token' \r
- # (https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod) \r
+ # If set to true, envoy will fetch normal k8s service account JWT from '/var/run/secrets/kubernetes.io/serviceaccount/token'\r
+ # (https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod)\r
# and pass to sds server, which will be used to request key/cert eventually.\r
# this flag is ignored if enableSdsTokenMount is set.\r
# This isn't supported for non-k8s case.\r
resources:\r
requests:\r
cpu: 10m\r
- \r
+\r
volumes:\r
- name: certs\r
secret:\r
resources:\r
requests:\r
cpu: 10m\r
- \r
+\r
volumes:\r
- name: kiali-configuration\r
configMap:\r
requests:\r
cpu: 10m\r
memory: 100Mi\r
- \r
+\r
volumeMounts:\r
- name: istio-certs\r
mountPath: /etc/certs\r
requests:\r
cpu: 10m\r
memory: 40Mi\r
- \r
+\r
volumeMounts:\r
- name: istio-certs\r
mountPath: /etc/certs\r
requests:\r
cpu: 10m\r
memory: 40Mi\r
- \r
+\r
volumeMounts:\r
- name: istio-certs\r
mountPath: /etc/certs\r
requests:\r
cpu: 10m\r
memory: 100Mi\r
- \r
+\r
volumeMounts:\r
- name: config-volume\r
mountPath: /etc/istio/config\r
resources:\r
requests:\r
cpu: 10m\r
- \r
+\r
volumeMounts:\r
- name: config-volume\r
mountPath: /etc/prometheus\r
resources:\r
requests:\r
cpu: 10m\r
- \r
+\r
affinity:\r
nodeAffinity:\r
requiredDuringSchedulingIgnoredDuringExecution:\r
resources:\r
requests:\r
cpu: 10m\r
- \r
+\r
volumes:\r
- name: config-volume\r
configMap:\r
- name: MEMORY_MAX_TRACES\r
value: "50000"\r
- name: QUERY_BASE_PATH\r
- value: /jaeger \r
+ value: /jaeger\r
livenessProbe:\r
httpGet:\r
path: /\r
resources:\r
requests:\r
cpu: 10m\r
- \r
+\r
affinity:\r
nodeAffinity:\r
requiredDuringSchedulingIgnoredDuringExecution:\r
urlIP=${1:-172.26.12.101}
urlPort=${2:-30942}
-
+
IFS_OLD=$IFS
IFS=$'\n'
for (( c=$START; c<=$END; c++))
do
-
+
#curl -w "%{time_total}\n" -o /dev/null -s http://$svcIP
cmd=$1
echo "Round:"$c"--"$cmd
sleep 0.1
done
-done
+done
echo "Latency:"
for (( c=$START; c<=$END; c++))
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
-
+
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
-
+
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
BufferData();
BufferData(GLsizeiptr size, void * data);
GLsizeiptr m_size;
- FixedBuffer m_fixedBuffer;
+ FixedBuffer m_fixedBuffer;
};
class ProgramData {
bool isProgram(GLuint program);
bool isProgramInitialized(GLuint program);
- void addProgramData(GLuint program);
+ void addProgramData(GLuint program);
void initProgramData(GLuint program, GLuint numIndexes);
void attachShader(GLuint program, GLuint shader);
void detachShader(GLuint program, GLuint shader);
void unrefShaderData(GLuint shader);
};
-typedef SmartPtr<GLSharedGroup> GLSharedGroupPtr;
+typedef SmartPtr<GLSharedGroup> GLSharedGroupPtr;
#endif //_GL_SHARED_GROUP_H_
glBufferData_client_proc_t m_glBufferData_enc;
glBufferSubData_client_proc_t m_glBufferSubData_enc;
glDeleteBuffers_client_proc_t m_glDeleteBuffers_enc;
-
+
glEnableClientState_client_proc_t m_glEnableClientState_enc;
glDisableClientState_client_proc_t m_glDisableClientState_enc;
glIsEnabled_client_proc_t m_glIsEnabled_enc;
#include "gl_types.h"
#ifndef gl_APIENTRY
-#define gl_APIENTRY
+#define gl_APIENTRY
#endif
typedef void (gl_APIENTRY *glAlphaFunc_client_proc_t) (void * ctx, GLenum, GLclampf);
typedef void (gl_APIENTRY *glClearColor_client_proc_t) (void * ctx, GLclampf, GLclampf, GLclampf, GLclampf);
#include "gl2_types.h"
#ifndef gl2_APIENTRY
-#define gl2_APIENTRY
+#define gl2_APIENTRY
#endif
typedef void (gl2_APIENTRY *glActiveTexture_client_proc_t) (void * ctx, GLenum);
typedef void (gl2_APIENTRY *glAttachShader_client_proc_t) (void * ctx, GLuint, GLuint);
} s_client_ext_funcs[] = {
#include "ClientAPIExts.in"
};
-static const int numExtFuncs = sizeof(s_client_ext_funcs) /
+static const int numExtFuncs = sizeof(s_client_ext_funcs) /
sizeof(s_client_ext_funcs[0]);
#undef API_ENTRY
with their attribute values.
bufSize is the size of buffer, the size should be at least equal to
(numConfigs + 1) * numAttribs * sizeof(GLuint)
- where numConfigs and numAttribs are the values returned in
+ where numConfigs and numAttribs are the values returned in
rcGetNumConfigs. if bufSize is not big enough then the negative number
of required bytes is returned otherwise the function returns the number
of configs and buffer is filled as follows: The first 'numAttribs'
in the same order as the attribute vector.
EGLint rcChooseConfig(EGLint *attribs, uint32_t attribs_size, uint32_t *configs, uint32_t configs_size)
- This function triggers an eglChooseConfig on the host, to get a list of
+ This function triggers an eglChooseConfig on the host, to get a list of
configs matching the given attribs values.
attribs - a list of attribute names followed by the desired values, terminated by EGL_NONE
attribs_size - the size of the list
configs - the returned matching configuration names (same names as familiar to the client in rcGetConfigs)
configs_size - the size of the configs buffers
- returns - the actual number of matching configurations (<= configs_size)
+ returns - the actual number of matching configurations (<= configs_size)
EGLint rcGetFBParam(EGLint param);
queries the host for framebuffer parameter, see renderControl_types.h
uint32_t rcCreateColorBuffer(uint32_t width, uint32_t height, GLenum internalFormat);
This function creates a colorBuffer object on the host which can be then
- be specified as a render target for a window surface through
+ be specified as a render target for a window surface through
rcSetWindowColorBuffer or to be displayed on the framebuffer window
through rcFBPost.
The function returns a handle to the colorBuffer object, with an initial
colorBuffer.
EGLint rcMakeCurrent(uint32_t context, uint32_t drawSurf, uint32_t readSurf);
- Binds a windowSurface(s) and current rendering context for the
+ Binds a windowSurface(s) and current rendering context for the
calling thread.
void rcFBPost(uint32_t colorBuffer);
with non-zero 'forRead' value, otherwise the function returns zero or
negative value on failure.
-void rcReadColorBuffer(uint32_t colorbuffer, GLint x, GLint y,
- GLint width, GLint height, GLenum format,
+void rcReadColorBuffer(uint32_t colorbuffer, GLint x, GLint y,
+ GLint width, GLint height, GLenum format,
GLenum type, void* pixels);
This function queries the host for the pixel content of a colorBuffer's
subregion. It act the same as OpenGL glReadPixels however pixels
are always packed with alignment of 1.
-void rcUpdateColorBuffer(uint32_t colorbuffer, GLint x, GLint y,
- GLint width, GLint height, GLenum format,
+void rcUpdateColorBuffer(uint32_t colorbuffer, GLint x, GLint y,
+ GLint width, GLint height, GLenum format,
GLenum type, void* pixels);
Updates the content of a subregion of a colorBuffer object.
pixels are always unpacked with alignment of 1.
#include "renderControl_types.h"
#ifndef renderControl_APIENTRY
-#define renderControl_APIENTRY
+#define renderControl_APIENTRY
#endif
typedef GLint (renderControl_APIENTRY *rcGetRendererVersion_client_proc_t) (void * ctx);
typedef EGLint (renderControl_APIENTRY *rcGetEGLVersion_client_proc_t) (void * ctx, EGLint*, EGLint*);
events[ID_TEMPERATURE].type = SENSOR_TYPE_TEMPERATURE;
continue;
}
-
+
/* "proximity:<value>" */
if (sscanf(buff, "proximity:%g", params+0) == 1) {
new_sensors |= SENSORS_PROXIMITY;
NUM_JOBS=5
_do_update_chroot=0
-# Default to vivid as we don't seem to have any working wily devices right now
+# Default to vivid as we don't seem to have any working wily devices right now
dist=vivid
clean=0
update_build_dir=0
export AC_NDK_PATH=~/.cache/anbox-${target_arch}-chroot-${dist}
fi
-if [ ! -d ${AC_NDK_PATH} ]; then
+if [ ! -d ${AC_NDK_PATH} ]; then
echo "no partial chroot dir detected. attempting to create one"
_do_update_chroot=1
fi
-if [ ! -d ${BUILD_DIR} ]; then
+if [ ! -d ${BUILD_DIR} ]; then
mkdir ${BUILD_DIR}
fi
| | |
- - - | - - - - - - - - - | - - - - - - - - - | - - - - -
| | |
- ____v____ ____v_____ _____v____ HOST
+ ____v____ ____v_____ _____v____ HOST
| | | | | | SYSTEM
| GLX | | GL 2.0 | | GL 2.0 | LIBRARIES
|_________| |__________| |__________|
tests/ut_rendercontrol_dec -> host library used by tests/ut_renderer
tests/ut_renderer -> unit-test for render control and rendering library.
-
+
II. Build system considerations:
--------------------------------
GLOBAL
base_opcode 1024
encoder_headers "glUtils.h" "GLEncoderUtils.h"
-
+
#void glClipPlanef(GLenum plane, GLfloat *equation)
glClipPlanef
dir equation in
#void glDeleteBuffers(GLsizei n, GLuint *buffers)
glDeleteBuffers
len buffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glDeleteTextures(GLsizei n, GLuint *textures)
glDeleteTextures
len textures (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#this function is marked as unsupported - it shouldn't be called directly
#instead it translated into - glDrawDirectElements and glDrawIndirectElements
glGenBuffers
len buffers (n * sizeof(GLuint))
dir buffers out
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenTextures(GLsizei n, GLuint *textures)
glGenTextures
len textures (n * sizeof(GLuint))
dir textures out
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGetFixedv(GLenum pname, GLfixed *params)
glGetFixedv
glDeleteRenderbuffersOES
dir renderbuffers in
len renderbuffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenRenderbuffersOES(GLsizei n, GLuint *renderbuffers)
glGenRenderbuffersOES
- dir renderbuffers out
+ dir renderbuffers out
len renderbuffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGetRenderbufferParameterivOES(GLenum target, GLenum pname, GLint *params)
glGetRenderbufferParameterivOES
glDeleteFramebuffersOES
dir framebuffers in
len framebuffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenFramebuffersOES(GLsizei n, GLuint *framebuffers)
glGenFramebuffersOES
dir framebuffers out
len framebuffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGetFramebufferAttachmentParameterivOES(GLenum target, GLenum attachment, GLenum pname, GLint *params)
glGetFramebufferAttachmentParameterivOES
glDeleteVertexArraysOES
dir arrays in
len arrays (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenVertexArraysOES(GLsizei n, GLuint *arrays)
glGenVertexArraysOES
dir arrays out
len arrays (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glDiscardFramebufferEXT(GLenum target, GLsizei numAttachments, const GLenum *attachments)
glDiscardFramebufferEXT
#void glMultiDrawArraysEXT(GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount)
glMultiDrawArraysEXT
flag unsupported
-
+
#void glMultiDrawElementsEXT(GLenum mode, const GLsizei *count, GLenum type, const GLvoid* const *indices, GLsizei primcount)
glMultiDrawElementsEXT
flag unsupported
#void glMultiDrawArraysSUN(GLenum mode, GLint *first, GLsizei *count, GLsizei primcount)
glMultiDrawArraysSUN
flag unsupported
-
+
#void glMultiDrawElementsSUN(GLenum mode, const GLsizei *count, GLenum type, const GLvoid* *indices, GLsizei primcount)
glMultiDrawElementsSUN
flag unsupported
glDeleteFencesNV
dir fences in
len fences (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenFencesNV(GLsizei n, GLuint *fences)
glGenFencesNV
dir fences in
len fences (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGetFenceivNV(GLuint fence, GLenum pname, GLint *params)
glGetFenceivNV
#void glDeleteBuffers(GLsizei n, GLuint *buffers)
glDeleteBuffers
len buffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glDeleteFramebuffers(GLsizei n, GLuint *framebuffers)
glDeleteFramebuffers
len framebuffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glDeleteRenderbuffers(GLsizei n, GLuint *renderbuffers)
glDeleteRenderbuffers
len renderbuffers (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glDeleteTextures(GLsizei n, GLuint *textures)
glDeleteTextures
len textures (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glDrawElements(GLenum mode, GLsizei count, GLenum type, GLvoid *indices)
glDrawElements
glGenBuffers
len buffers (n * sizeof(GLuint))
dir buffers out
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenFramebuffers(GLsizei n, GLuint *framebuffers)
glGenFramebuffers
len framebuffers (n * sizeof(GLuint))
dir framebuffers out
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenRenderbuffers(GLsizei n, GLuint *renderbuffers)
glGenRenderbuffers
len renderbuffers (n * sizeof(GLuint))
dir renderbuffers out
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenTextures(GLsizei n, GLuint *textures)
glGenTextures
len textures (n * sizeof(GLuint))
dir textures out
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGetActiveAttrib(GLuint program, GLuint index, GLsizei bufsize, GLsizei *length, GLint *size, GLenum *type, GLchar *name)
glGetActiveAttrib
dir type out
len type (sizeof(GLenum))
var_flag type nullAllowed
-
+
#void glGetActiveUniform(GLuint program, GLuint index, GLsizei bufsize, GLsizei *length, GLint *size, GLenum *type, GLchar *name)
glGetActiveUniform
len name bufsize
var_flag length nullAllowed
dir infolog out
len infolog bufsize
-
+
#void glGetShaderPrecisionFormat(GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision)
glGetShaderPrecisionFormat
# client-state shall be handled locally by the encoder in most cases.
# however, GL_CURRENT_VERTEX_ATTRIB and potential others are handled by the server side,
-# thus we still need to implement it.
+# thus we still need to implement it.
#void glGetVertexAttribfv(GLuint index, GLenum pname, GLfloat *params)
glGetVertexAttribfv
dir params out
glTexSubImage2D
len pixels glesv2_enc::pixelDataSize(self, width, height, format, type, 0)
var_flag pixels nullAllowed isLarge
-
+
#void glUniform1fv(GLint location, GLsizei count, GLfloat *v)
glUniform1fv
len v (count * sizeof(GLfloat))
#void glUniformMatrix3fv(GLint location, GLsizei count, GLboolean transpose, GLfloat *value)
glUniformMatrix3fv
len value (count * 9 * sizeof(GLfloat))
-
+
#void glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpose, GLfloat *value)
glUniformMatrix4fv
len value (count * 16 * sizeof(GLfloat))
#void glTexSubImage3DOES(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, GLvoid *pixels)
glTexSubImage3DOES
- len pixels glesv2_enc::pixelDataSize3D(self, width, height, depth, format, type, 0)
+ len pixels glesv2_enc::pixelDataSize3D(self, width, height, depth, format, type, 0)
var_flag pixels nullAllowed isLarge
#void glCompressedTexImage3DOES(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, GLvoid *data)
#void glDeleteVertexArraysOES(GLsizei n, GLuint *arrays)
glDeleteVertexArraysOES
len arrays (n * sizeof(GLuint))
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glGenVertexArraysOES(GLsizei n, GLuint *arrays)
glGenVertexArraysOES
len arrays (n * sizeof(GLuint))
dir arrays out
- param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
+ param_check n if(n<0){ ctx->setError(GL_INVALID_VALUE); return; }
#void glDiscardFramebufferEXT(GLenum target, GLsizei numAttachments, GLenum *attachments)
#void glVertexAttribPointerData(GLuint indx, GLint size, GLenum type, GLboolean normalized, GLsizei stride, void * data, GLuint datalen)
glVertexAttribPointerData
len data datalen
- custom_pack data glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, size, type, stride, datalen)
+ custom_pack data glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, size, type, stride, datalen)
flag custom_decoder
flag not_api
len string len
flag custom_decoder
flag not_api
-
+
glFinishRoundTrip
flag custom_decoder
flag not_api
for (size_t i = 0; i < n; i++) {
EntryPoint *e = &at(i);
if (e->unsupported()) {
- fprintf(fp,
+ fprintf(fp,
"\tthis->%s = (%s_%s_proc_t) &enc_unsupported;\n",
e->name().c_str(),
e->name().c_str(),
fprintf(fp, "struct %s : public %s_%s_context_t {\n\n",
classname.c_str(), m_basename.c_str(), sideString(SERVER_SIDE));
fprintf(fp, "\tsize_t decode(void *buf, size_t bufsize, IOStream *stream);\n");
- if (strcmp(classname.c_str(), "gles2_decoder_context_t") == 0){
- fprintf(fp,
+ if (strcmp(classname.c_str(), "gles2_decoder_context_t") == 0){
+ fprintf(fp,
"\tvoid freeShader(); \n\
\tvoid freeProgram(); \n\
\tstd::map<GLuint, GLuint> m_programs; \n\
// glsl shader/program free;
if (strcmp(classname.c_str(), "gles2_decoder_context_t") == 0) {
fprintf(fp, "void %s::freeShader(){\n", classname.c_str());
- fprintf(fp,
+ fprintf(fp,
" \n\
\tauto it = m_shaders.begin();\n\
\tm_lock.lock();\n\
}\n\n");
fprintf(fp, "void %s::freeProgram(){\n", classname.c_str());
- fprintf(fp,
+ fprintf(fp,
" \n\
\tauto it = m_programs.begin(); \n\
\tm_lock.lock();\n\
m_shaders.insert({var_shader, 1});\n\
m_lock.unlock();\n");
} else if(strcmp(e->name().c_str(), "glDeleteProgram") == 0){
- fprintf(fp,
+ fprintf(fp,
"\t\t\tm_lock.lock(); \n"
"\t\t\tauto pro = m_programs.find(var_program); \n"
"\t\t\tif (pro != m_programs.end()) \n"
"\t\t\t}\n"
"\t\t\tm_lock.unlock();\n");
} else if(strcmp(e->name().c_str(), "glDeleteShader") == 0){
- fprintf(fp,
+ fprintf(fp,
"\t\t\tm_lock.lock(); \n\
\t\t\tauto shader = m_shaders.find(var_shader); \n\
\t\t\tif (shader != m_shaders.end()) \n\
if (!parseTypeDeclaration(field, &retTypeName, &error)) {
fprintf(stderr,
"line: %d: Parsing error in field <%s>: %s\n",
- lc,
- field.c_str(),
+ lc,
+ field.c_str(),
error.c_str());
return false;
}
file has the following format:
[prefix](retvalType, FuncName, <param type> [param name],...)
-where
+where
retvalType - The function return value type
FuncName - function name
<param type> mandatory parameter type
[param name] - optional parameter name
Examples:
-GL_ENTRY(void, glVertex1f, float v)
+GL_ENTRY(void, glVertex1f, float v)
XXX(int *, foo, int n, float, short)
XXX(void, glFlush, void)
Note: Empty lines in the file are ignored. A line starts with # is a comment
-2. basename.attrib - Attributes information of the API.
+2. basename.attrib - Attributes information of the API.
This file includes additional flags, pointers datalen information and
global attributes of the protocol. For uptodate format of the file,
please refer to the specification file in the project source
GLint* 32 %p true
GLptr 32 %p true
-Encoder generated code files
+Encoder generated code files
----------------------------
In order to generate the encoder files, one should run the ‘emugen’
tool as follows:
the encoder context. For example, such callback could fetch the
context from a Thread Local Storage (TLS) location.
-api_client_proc.h - type defintions for the protocol procedures.
+api_client_proc.h - type defintions for the protocol procedures.
api_client_context.h - defines the client side dispatch table data
structure that stores the encoding functions. This data structure also
‘client_context’ class above and adds encoding and streaming
functionality.
-api_enc.cpp - Encoder implementation.
+api_enc.cpp - Encoder implementation.
Decoder generated files
-----------------------
to the codec, out from data that returns from the codec.
format: dir <varname> <[in | out | inout]>
- var_flag
+ var_flag
description : set variable flags
format: var_flag <varname> < nullAllowed | isLarge | ... >
isLarge -> for pointer variables, indicates that the data should be sent without an intermediate copy
flag
- description: set entry point flag;
+ description: set entry point flag;
format: flag < unsupported | ... >
supported flags are:
- unsupported - The encoder side implementation is pointed to "unsuppored reporting function".
+ unsupported - The encoder side implementation is pointed to "unsuppored reporting function".
custom_decoder - The decoder is expected to be provided with
custom implementation. The call to the
deocder function includes a pointer to the
size_t foo_decoder_context_t::decode(void *buf, size_t len, IOStream *stream)
{
-
+
size_t pos = 0;
- if (len < 8) return pos;
+ if (len < 8) return pos;
unsigned char *ptr = (unsigned char *)buf;
- bool unknownOpcode = false;
-#ifdef CHECK_GL_ERROR
- char lastCall[256] = {0};
-#endif
- while ((len - pos >= 8) && !unknownOpcode) {
- uint32_t opcode = *(uint32_t *)ptr;
+ bool unknownOpcode = false;
+#ifdef CHECK_GL_ERROR
+ char lastCall[256] = {0};
+#endif
+ while ((len - pos >= 8) && !unknownOpcode) {
+ uint32_t opcode = *(uint32_t *)ptr;
size_t packetLen = *(uint32_t *)(ptr + 4);
- if (len - pos < packetLen) return pos;
+ if (len - pos < packetLen) return pos;
bool useChecksum = ChecksumCalculatorThreadInfo::getVersion() > 0;
size_t checksumSize = 0;
if (useChecksum) {
FooInt var_func = Unpack<FooInt,uint32_t>(ptr + 8);
FooFloat var_ref = Unpack<FooFloat,uint32_t>(ptr + 8 + 4);
if (useChecksum) {
- ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + 4, ptr + 8 + 4 + 4, checksumSize,
+ ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + 4, ptr + 8 + 4 + 4, checksumSize,
"8 + 4 + 4::decode, OP_foo_decoder_context_t: GL checksumCalculator failure\n");
}
DEBUG("foo(%p): fooAlphaFunc(%d %f )\n", stream,var_func, var_ref);
uint32_t size_stuff __attribute__((unused)) = Unpack<uint32_t,uint32_t>(ptr + 8);
InputBuffer inptr_stuff(ptr + 8 + 4, size_stuff);
if (useChecksum) {
- ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + size_stuff, ptr + 8 + 4 + size_stuff, checksumSize,
+ ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + size_stuff, ptr + 8 + 4 + size_stuff, checksumSize,
"8 + 4 + size_stuff::decode, OP_foo_decoder_context_t: GL checksumCalculator failure\n");
}
size_t totalTmpSize = sizeof(FooBoolean);
uint32_t size_params __attribute__((unused)) = Unpack<uint32_t,uint32_t>(ptr + 8);
InputBuffer inptr_params(ptr + 8 + 4, size_params);
if (useChecksum) {
- ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + size_params, ptr + 8 + 4 + size_params, checksumSize,
+ ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + size_params, ptr + 8 + 4 + size_params, checksumSize,
"8 + 4 + size_params::decode, OP_foo_decoder_context_t: GL checksumCalculator failure\n");
}
DEBUG("foo(%p): fooUnsupported(%p(%u) )\n", stream,(void*)(inptr_params.get()), size_params);
case OP_fooDoEncoderFlush: {
FooInt var_param = Unpack<FooInt,uint32_t>(ptr + 8);
if (useChecksum) {
- ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4, ptr + 8 + 4, checksumSize,
+ ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4, ptr + 8 + 4, checksumSize,
"8 + 4::decode, OP_foo_decoder_context_t: GL checksumCalculator failure\n");
}
DEBUG("foo(%p): fooDoEncoderFlush(%d )\n", stream,var_param);
uint32_t size_param __attribute__((unused)) = Unpack<uint32_t,uint32_t>(ptr + 8);
InputBuffer inptr_param(ptr + 8 + 4, size_param);
if (useChecksum) {
- ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + size_param, ptr + 8 + 4 + size_param, checksumSize,
+ ChecksumCalculatorThreadInfo::validOrDie(ptr, 8 + 4 + size_param, ptr + 8 + 4 + size_param, checksumSize,
"8 + 4 + size_param::decode, OP_foo_decoder_context_t: GL checksumCalculator failure\n");
}
DEBUG("foo(%p): fooTakeConstVoidPtrConstPtr(%p(%u) )\n", stream,(const void* const*)(inptr_param.get()), size_param);
#ifndef GUARD_foo_decoder_context_t
#define GUARD_foo_decoder_context_t
-#include "IOStream.h"
+#include "IOStream.h"
#include "foo_server_context.h"
#include "emugl/common/logging.h"
#ifndef foo_APIENTRY
-#define foo_APIENTRY
+#define foo_APIENTRY
#endif
typedef void (foo_APIENTRY *fooAlphaFunc_server_proc_t) (FooInt, FooFloat);
typedef FooBoolean (foo_APIENTRY *fooIsBuffer_server_proc_t) (void*);
#include "emugl/common/logging.h"
#ifndef foo_APIENTRY
-#define foo_APIENTRY
+#define foo_APIENTRY
#endif
typedef void (foo_APIENTRY *fooAlphaFunc_client_proc_t) (void * ctx, FooInt, FooFloat);
typedef FooBoolean (foo_APIENTRY *fooIsBuffer_client_proc_t) (void * ctx, void*);
#include "emugl/common/logging.h"
#ifndef foo_APIENTRY
-#define foo_APIENTRY
+#define foo_APIENTRY
#endif
typedef void (foo_APIENTRY *fooAlphaFunc_wrapper_proc_t) (FooInt, FooFloat);
typedef FooBoolean (foo_APIENTRY *fooIsBuffer_wrapper_proc_t) (void*);
)
if(BACKWARD_HAS_EXTERNAL_LIBRARIES)
set_target_properties(Backward::Backward PROPERTIES
- INTERFACE_LINK_LIBRARIES "${BACKWARD_LIBRARIES}"
+ INTERFACE_LINK_LIBRARIES "${BACKWARD_LIBRARIES}"
)
endif()
endif()
FILE* print(StackTrace& st, FILE* fp = stderr);
// Resolve and print a stack trace to the given std::ostream object.
- // Color will only be used if color_mode is set to always.
+ // Color will only be used if color_mode is set to always.
template <typename ST>
std::ostream& print(ST& st, std::ostream& os);
```
QEMU_ARCH=aarch64
}
-# Toolchains for little-endian, hard-float, 32-bit ARMv7 (and earlier) for GNU/Linux systems
+# Toolchains for little-endian, hard-float, 32-bit ARMv7 (and earlier) for GNU/Linux systems
function set_arm-linux-gnueabihf() {
TOOLCHAIN=LINARO
TARGET=arm-linux-gnueabihf
DisableHardwareCapabilities();
auto& fs = GetEmptyFilesystem();
fs.CreateFile("/proc/cpuinfo",
- R"(Processor : ARMv6-compatible processor rev 6 (v6l)
-BogoMIPS : 199.47
-Features : swp half thumb fastmult vfp edsp java
-CPU implementer : 0x41
-CPU architecture: 7
-CPU variant : 0x0
-CPU part : 0xb76
-CPU revision : 6
-
-Hardware : SPICA
-Revision : 0020
+ R"(Processor : ARMv6-compatible processor rev 6 (v6l)
+BogoMIPS : 199.47
+Features : swp half thumb fastmult vfp edsp java
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xb76
+CPU revision : 6
+
+Hardware : SPICA
+Revision : 0020
Serial : 33323613546d00ec )");
const auto info = GetArmInfo();
EXPECT_EQ(info.architecture, 6);
*/
class Connection
{
-public:
+public:
typedef std::function<void(const std::function<void()>&)> Dispatcher;
/**
process-cpp
STATIC
-
+
core/posix/backtrace.h
core/posix/backtrace.cpp
)
add_subdirectory(linux)
-
\ No newline at end of file
posix-process
)
-
\ No newline at end of file
auto it = ::environ;
while (it != nullptr && *it != nullptr)
{
- std::string line(*it);
+ std::string line(*it);
functor(line.substr(0,line.find_first_of('=')),
line.substr(line.find_first_of('=')+1));
++it;
-# We have to manually alter the cxx flags to have a working
-# travis-ci build. Its container-based infrastructure only features
+# We have to manually alter the cxx flags to have a working
+# travis-ci build. Its container-based infrastructure only features
# a very old cmake that does not support the more current:
# set_property(TARGET xdg_test PROPERTY CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
// Copyright (C) 2015 Thomas Voß <thomas.voss.bochum@gmail.com>
-//
+//
// This library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
sources="$sources source$sourceid"
;;
:)
- echo "Option -$OPTARG requires an argument"
+ echo "Option -$OPTARG requires an argument"
usage
exit 1
;;
exit 0
;;
\?)
- echo "Invalid option: -$OPTARG"
+ echo "Invalid option: -$OPTARG"
usage
exit 1
;;
echo "creating phablet-compatible $arch partial chroot for anbox compilation in directory ${directory}"
if [ ! -d ${directory} ]; then
- mkdir -p ${directory}
+ mkdir -p ${directory}
fi
DEBCONTROL=$(pwd)/../debian/control
# dpkg-checkbuilddeps returns non-zero when dependencies are not met and the list is sent to stderr
builddeps=$(dpkg-checkbuilddeps -a ${arch} --admindir=. ${DEBCONTROL} 2>&1 )
if [ $? -eq 0 ] ; then
- exit 0
+ exit 0
fi
echo "${builddeps}"
" >> mstrap.conf
done
-multistrap -f mstrap.conf
+multistrap -f mstrap.conf
rm -f var/cache/apt/archives/lock
ln -sf $(pwd)$(readlink ${broken_symlink}) ${broken_symlink}
done
-popd > /dev/null
+popd > /dev/null
// Intel Core i7 Q720
"Q 720",
// Intel Pentium T4500
- "T4500",
+ "T4500",
// Intel Core i7 Q720
"Q 720",
// Intel Xeon E5520
renderer->bindColorBufferToRenderbuffer(colorBuffer);
}
-static EGLint rcColorBufferCacheFlush(uint32_t, EGLint,
+static EGLint rcColorBufferCacheFlush(uint32_t, EGLint,
int) {
// XXX: TBD - should be implemented
return 0;
}
if (mouse_events.size() > 0) {
- mouse_events.push_back({EV_SYN, SYN_REPORT, 0});
+ mouse_events.push_back({EV_SYN, SYN_REPORT, 0});
pointer_->send_events(mouse_events);
}
void Platform::push_finger_up(int finger_id, std::vector<input::Event> &touch_events){
int slot = find_touch_slot(finger_id);
- if (slot == -1)
+ if (slot == -1)
return;
push_slot(touch_events, slot);
touch_events.push_back({EV_ABS, ABS_MT_TRACKING_ID, -1});
void Platform::push_finger_motion(int x, int y, int finger_id, std::vector<input::Event> &touch_events){
int slot = find_touch_slot(finger_id);
- if (slot == -1)
+ if (slot == -1)
return;
push_slot(touch_events, slot);
touch_events.push_back({EV_ABS, ABS_MT_POSITION_X, x});
Runtime::Runtime(std::uint32_t pool_size)
: pool_size_{pool_size},
-
+
#if BOOST_VERSION >= 106600
service_{static_cast<int>(pool_size_)},
#else
service_{pool_size_},
#endif
-
+
strand_{service_},
keep_alive_{service_} {}
.. _SEBA: https://wiki.opencord.org/display/CORD/SEBA
SDN-Enabled Broadband Access (SEBA) is an Exemplar Platform being built by the ONF and CORD community,
-which would also be a sample use case of IEC.
+which would also be a sample use case of IEC.
We would enable SEBA on arm with the native installation method for SEBA by great efforts.
RUN pip install virtualenv
RUN apt-mark hold kubectl && apt autoremove -y \
&& rm -rf /var/lib/apt/lists/*
-RUN groupadd -r $TEST_USER && useradd -m -s /bin/bash -g $TEST_USER \
- --system -G sudo -p $(openssl passwd -1 $TEST_USER) $TEST_USER
+RUN groupadd -r $TEST_USER && useradd -m -s /bin/bash -g $TEST_USER \
+ --system -G sudo -p $(openssl passwd -1 $TEST_USER) $TEST_USER
RUN echo "$TEST_USER\tALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/90-docker-users
USER $TEST_USER
--- a/templates/Dockerfile.j2
+++ b/templates/Dockerfile.j2
@@ -24,8 +24,8 @@ FROM centos:7
-
+
RUN yum update -y && yum install -y curl && yum clean all
-
+
-COPY --from=golang /go/src/github.com/alexandruavadanii/beats/{{ beat }}/build/distributions/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz /tmp
-RUN tar zxf /tmp/FIXME -C /tmp && \
+COPY --from=golang /go/src/github.com/elastic/beats/{{ beat }}/build/distributions/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz /tmp
+RUN tar zxf /tmp/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz -C /tmp && \
mv /tmp/{{ beat }}-{{ elastic_version }}-linux-arm64 {{ beat_home }} && \
rm /tmp/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz
-
---
+
+--
2.17.1
+++ b/src/test/cord-api/Dockerfile.k8s-api-tester
@@ -19,7 +19,7 @@
FROM python:2.7
-
+
RUN pip install httpie robotframework robotframework-requests pexpect \
- robotframework-sshlibrary robotframework-httplibrary robotframework-kafkalibrary pygments pyyaml tinydb && \
+ robotframework-sshlibrary robotframework-httplibrary robotframework-kafkalibrary pygments pyyaml tinydb pynacl==1.1.2 && \
pip install -U requests && rm -rf /var/lib/apt/lists/*
-
+
RUN mkdir -p /src/cord-api
---
+--
2.17.1
--- a/Makefile
+++ b/Makefile
@@ -55,7 +55,7 @@ release: promu github-release
-
+
promu:
@GOOS=$(shell uname -s | tr A-Z a-z) \
- GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
+ GOARCH=$(subst aarch64,arm64,$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m)))) \
$(GO) get -u github.com/prometheus/promu
-
+
github-release:
---
+--
2.17.1
--- a/kafka/Dockerfile
+++ b/kafka/Dockerfile
@@ -5,7 +5,7 @@ ENV kafka_bin_version=2.12-$kafka_version
-
+
RUN apk add --no-cache --update-cache --virtual build-dependencies curl ca-certificates \
&& mkdir -p /opt/kafka \
- && curl -SLs "https://www-eu.apache.org/dist/kafka/$kafka_version/kafka_$kafka_bin_version.tgz" | tar -xzf - --strip-components=1 -C /opt/kafka \
+ && curl -SLs "https://archive.apache.org/dist/kafka/$kafka_version/kafka_$kafka_bin_version.tgz" |tar -xzf - --strip-components=1 -C /opt/kafka \
&& apk del build-dependencies \
&& rm -rf /var/cache/apk/*
-
---
+
+--
2.17.1
+++ b/Dockerfile
@@ -1,13 +1,11 @@
FROM alpine:3.6
-
+
-LABEL MAINTAINER="Sergii Nuzhdin <ipaq.lw@gmail.com@gmail.com>"
-
-ENV KUBE_LATEST_VERSION="v1.13.0"
+ENV KUBE_LATEST_VERSION="v1.6.4"
-
+
RUN apk add --update ca-certificates \
&& apk add --update -t deps curl \
&& apk add --update gettext \
&& chmod +x /usr/local/bin/kubectl \
&& apk del --purge deps \
&& rm /var/cache/apk/*
---
+--
2.17.1
+++ b/Dockerfile.synchronizer
@@ -16,7 +16,7 @@
# docker build -t xosproject/rcord-synchronizer:candidate -f Dockerfile.synchronizer .
-
+
# xosproject/rcord-synchronizer
-FROM xosproject/xos-synchronizer-base:2.1.25
+FROM cachengo/xos-synchronizer-base:2.1.38
-
+
COPY xos/synchronizer /opt/xos/synchronizers/rcord
COPY VERSION /opt/xos/synchronizers/rcord/
---
+--
2.17.1
packaging==17.1
pexpect==4.6.0
+pynacl==1.1.2
-
+
# python-consul>=0.6.1 we need the pre-released version for now, because 0.6.1 does not
# yet support Twisted. Once this is released, it will be the 0.6.2 version
---
+--
2.17.1
--- a/Makefile
+++ b/Makefile
@@ -237,7 +237,7 @@ jenkins-containers: base voltha ofagent netconf consul cli envoy fluentd unum j2
-
+
prod-containers: base voltha ofagent netconf shovel onos dashd cli grafana consul tools envoy fluentd unum j2
-
+
-seba-containers: base voltha ofagent netconf shovel onos tester config-push dashd cli portainer envoy alarm-generator test_runner
+seba-containers: base voltha ofagent netconf shovel onos tester config-push dashd cli portainer envoy alarm-generator ponsim test_runner
-
+
containers: base voltha ofagent netconf shovel onos tester config-push dashd cli portainer grafana nginx consul tools envoy fluentd unum ponsim j2 alarm-generator test_runner
-
---
+
+--
2.17.1